Repository: sodafoundation/delfin Branch: master Commit: 14c1e86b6a86 Files: 416 Total size: 3.6 MB Directory structure: gitextract_3m8g0ek4/ ├── .coveragerc ├── .github/ │ ├── ISSUE_TEMPLATE.md │ ├── PULL_REQUEST_TEMPLATE.md │ └── workflows/ │ ├── delfin_ci.yml │ └── delfin_e2e_test.yml ├── .gitignore ├── CHANGELOG/ │ ├── CHANGELOG-v1.0.0.md │ └── CHANGELOG-v1.1.0.md ├── Dockerfile ├── LICENSE ├── README.md ├── codecov.yml ├── delfin/ │ ├── __init__.py │ ├── alert_manager/ │ │ ├── __init__.py │ │ ├── alert_processor.py │ │ ├── constants.py │ │ ├── rpcapi.py │ │ ├── snmp_validator.py │ │ └── trap_receiver.py │ ├── api/ │ │ ├── __init__.py │ │ ├── api_utils.py │ │ ├── common/ │ │ │ ├── __init__.py │ │ │ └── wsgi.py │ │ ├── contrib/ │ │ │ └── __init__.py │ │ ├── extensions.py │ │ ├── middlewares.py │ │ ├── schemas/ │ │ │ ├── __init__.py │ │ │ ├── access_info.py │ │ │ ├── alert_source.py │ │ │ ├── alerts.py │ │ │ ├── storage_capabilities_schema.py │ │ │ └── storages.py │ │ ├── v1/ │ │ │ ├── __init__.py │ │ │ ├── access_info.py │ │ │ ├── alert_source.py │ │ │ ├── alerts.py │ │ │ ├── controllers.py │ │ │ ├── disks.py │ │ │ ├── filesystems.py │ │ │ ├── masking_views.py │ │ │ ├── port_groups.py │ │ │ ├── ports.py │ │ │ ├── qtrees.py │ │ │ ├── quotas.py │ │ │ ├── router.py │ │ │ ├── shares.py │ │ │ ├── storage_host_groups.py │ │ │ ├── storage_host_initiators.py │ │ │ ├── storage_hosts.py │ │ │ ├── storage_pools.py │ │ │ ├── storages.py │ │ │ ├── volume_groups.py │ │ │ └── volumes.py │ │ ├── validation/ │ │ │ ├── __init__.py │ │ │ ├── parameter_types.py │ │ │ └── validators.py │ │ └── views/ │ │ ├── __init__.py │ │ ├── access_info.py │ │ ├── alert_source.py │ │ ├── alerts.py │ │ ├── controllers.py │ │ ├── disks.py │ │ ├── filesystems.py │ │ ├── masking_views.py │ │ ├── port_groups.py │ │ ├── ports.py │ │ ├── qtrees.py │ │ ├── quotas.py │ │ ├── shares.py │ │ ├── storage_host_groups.py │ │ ├── storage_host_initiators.py │ │ ├── storage_hosts.py │ │ ├── storage_pools.py │ │ ├── storages.py │ │ ├── volume_groups.py │ │ └── volumes.py │ ├── cmd/ │ │ ├── __init__.py │ │ ├── alert.py │ │ ├── api.py │ │ └── task.py │ ├── common/ │ │ ├── __init__.py │ │ ├── alert_util.py │ │ ├── config.py │ │ ├── constants.py │ │ └── sqlalchemyutils.py │ ├── context.py │ ├── coordination.py │ ├── cryptor.py │ ├── db/ │ │ ├── __init__.py │ │ ├── api.py │ │ ├── base.py │ │ └── sqlalchemy/ │ │ ├── __init__.py │ │ ├── api.py │ │ └── models.py │ ├── drivers/ │ │ ├── __init__.py │ │ ├── api.py │ │ ├── dell_emc/ │ │ │ ├── __init__.py │ │ │ ├── power_store/ │ │ │ │ ├── __init__.py │ │ │ │ ├── consts.py │ │ │ │ ├── power_store.py │ │ │ │ └── rest_handler.py │ │ │ ├── scaleio/ │ │ │ │ ├── __init__.py │ │ │ │ ├── alert_consts.py │ │ │ │ ├── consts.py │ │ │ │ ├── rest_handler.py │ │ │ │ └── scaleio_stor.py │ │ │ ├── unity/ │ │ │ │ ├── __init__.py │ │ │ │ ├── alert_handler.py │ │ │ │ ├── consts.py │ │ │ │ ├── rest_handler.py │ │ │ │ └── unity.py │ │ │ ├── vmax/ │ │ │ │ ├── __init__.py │ │ │ │ ├── alert_handler/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── alert_mapper.py │ │ │ │ │ ├── oid_mapper.py │ │ │ │ │ ├── snmp_alerts.py │ │ │ │ │ └── unisphere_alerts.py │ │ │ │ ├── client.py │ │ │ │ ├── constants.py │ │ │ │ ├── perf_utils.py │ │ │ │ ├── rest.py │ │ │ │ └── vmax.py │ │ │ ├── vnx/ │ │ │ │ ├── __init__.py │ │ │ │ └── vnx_block/ │ │ │ │ ├── __init__.py │ │ │ │ ├── alert_handler.py │ │ │ │ ├── component_handler.py │ │ │ │ ├── consts.py │ │ │ │ ├── navi_handler.py │ │ │ │ ├── navicli_client.py │ │ │ │ └── vnx_block.py │ │ │ └── vplex/ │ │ │ ├── __init__.py │ │ │ ├── alert_handler.py │ │ │ ├── consts.py │ │ │ ├── rest_handler.py │ │ │ └── vplex_stor.py │ │ ├── driver.py │ │ ├── fake_storage/ │ │ │ └── __init__.py │ │ ├── fujitsu/ │ │ │ ├── __init__.py │ │ │ └── eternus/ │ │ │ ├── __init__.py │ │ │ ├── cli_handler.py │ │ │ ├── consts.py │ │ │ ├── eternus_ssh_client.py │ │ │ └── eternus_stor.py │ │ ├── h3c/ │ │ │ ├── __init__.py │ │ │ └── unistor_cf/ │ │ │ ├── __init__.py │ │ │ └── unistor_cf.py │ │ ├── helper.py │ │ ├── hitachi/ │ │ │ ├── __init__.py │ │ │ ├── hnas/ │ │ │ │ ├── __init__.py │ │ │ │ ├── constants.py │ │ │ │ ├── hds_nas.py │ │ │ │ └── nas_handler.py │ │ │ └── vsp/ │ │ │ ├── __init__.py │ │ │ ├── consts.py │ │ │ ├── rest_handler.py │ │ │ └── vsp_stor.py │ │ ├── hpe/ │ │ │ ├── __init__.py │ │ │ ├── hpe_3par/ │ │ │ │ ├── __init__.py │ │ │ │ ├── alert_handler.py │ │ │ │ ├── component_handler.py │ │ │ │ ├── consts.py │ │ │ │ ├── hpe_3parstor.py │ │ │ │ ├── rest_handler.py │ │ │ │ └── ssh_handler.py │ │ │ └── hpe_msa/ │ │ │ ├── __init__.py │ │ │ ├── consts.py │ │ │ ├── hpe_msastor.py │ │ │ └── ssh_handler.py │ │ ├── huawei/ │ │ │ ├── __init__.py │ │ │ └── oceanstor/ │ │ │ ├── __init__.py │ │ │ ├── alert_handler.py │ │ │ ├── consts.py │ │ │ ├── oceanstor.py │ │ │ ├── oid_mapper.py │ │ │ └── rest_client.py │ │ ├── ibm/ │ │ │ ├── __init__.py │ │ │ ├── ds8k/ │ │ │ │ ├── __init__.py │ │ │ │ ├── alert_handler.py │ │ │ │ ├── consts.py │ │ │ │ ├── ds8k.py │ │ │ │ └── rest_handler.py │ │ │ └── storwize_svc/ │ │ │ ├── __init__.py │ │ │ ├── consts.py │ │ │ ├── ssh_handler.py │ │ │ └── storwize_svc.py │ │ ├── inspur/ │ │ │ ├── __init__.py │ │ │ └── as5500/ │ │ │ ├── __init__.py │ │ │ └── as5500.py │ │ ├── macro_san/ │ │ │ ├── __init__.py │ │ │ └── ms/ │ │ │ ├── __init__.py │ │ │ ├── consts.py │ │ │ ├── file/ │ │ │ │ └── __init__.py │ │ │ ├── macro_ssh_client.py │ │ │ ├── ms_handler.py │ │ │ └── ms_stor.py │ │ ├── manager.py │ │ ├── netapp/ │ │ │ ├── __init__.py │ │ │ └── dataontap/ │ │ │ ├── __init__.py │ │ │ ├── cluster_mode.py │ │ │ ├── constants.py │ │ │ ├── mapping_handler.py │ │ │ ├── netapp_handler.py │ │ │ └── performance_handler.py │ │ ├── pure/ │ │ │ ├── __init__.py │ │ │ └── flasharray/ │ │ │ ├── __init__.py │ │ │ ├── consts.py │ │ │ ├── pure_flasharray.py │ │ │ └── rest_handler.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── performance_file/ │ │ │ ├── __init__.py │ │ │ ├── macro_san/ │ │ │ │ └── __init__.py │ │ │ ├── svc/ │ │ │ │ └── __init__.py │ │ │ └── vnx_block/ │ │ │ └── __init__.py │ │ ├── rest_client.py │ │ ├── ssh_client.py │ │ └── tools.py │ ├── exception.py │ ├── exporter/ │ │ ├── __init__.py │ │ ├── base_exporter.py │ │ ├── example.py │ │ ├── kafka/ │ │ │ ├── __init__.py │ │ │ ├── exporter.py │ │ │ └── kafka.py │ │ └── prometheus/ │ │ ├── __init__.py │ │ ├── alert_manager.py │ │ ├── exporter.py │ │ ├── exporter_server.py │ │ └── prometheus.py │ ├── i18n.py │ ├── leader_election/ │ │ ├── __init__.py │ │ ├── distributor/ │ │ │ ├── __init__.py │ │ │ ├── perf_job_manager.py │ │ │ └── task_distributor.py │ │ ├── factory.py │ │ ├── interface.py │ │ └── tooz/ │ │ ├── __init__.py │ │ ├── callback.py │ │ └── leader_elector.py │ ├── manager.py │ ├── rpc.py │ ├── service.py │ ├── ssl_utils.py │ ├── task_manager/ │ │ ├── __init__.py │ │ ├── manager.py │ │ ├── metrics_manager.py │ │ ├── metrics_rpcapi.py │ │ ├── perf_job_controller.py │ │ ├── rpcapi.py │ │ ├── scheduler/ │ │ │ ├── __init__.py │ │ │ ├── schedule_manager.py │ │ │ └── schedulers/ │ │ │ ├── __init__.py │ │ │ └── telemetry/ │ │ │ ├── __init__.py │ │ │ ├── failed_performance_collection_handler.py │ │ │ ├── job_handler.py │ │ │ └── performance_collection_handler.py │ │ ├── subprocess_manager.py │ │ ├── subprocess_rpcapi.py │ │ └── tasks/ │ │ ├── __init__.py │ │ ├── alerts.py │ │ ├── resources.py │ │ └── telemetry.py │ ├── test.py │ ├── tests/ │ │ ├── __init__.py │ │ ├── e2e/ │ │ │ ├── GetResources.robot │ │ │ ├── GetStorage.robot │ │ │ ├── README.md │ │ │ ├── RegisterStorage.robot │ │ │ ├── RemoveStorage.robot │ │ │ ├── UpdateAccessInfo.robot │ │ │ ├── __init__.py │ │ │ ├── test.json │ │ │ ├── test_e2e.sh │ │ │ └── testdriver/ │ │ │ ├── __init__.py │ │ │ └── storage.json │ │ └── unit/ │ │ ├── __init__.py │ │ ├── alert_manager/ │ │ │ ├── __init__.py │ │ │ ├── fakes.py │ │ │ ├── test_alert_processor.py │ │ │ ├── test_snmp_validator.py │ │ │ └── test_trap_receiver.py │ │ ├── api/ │ │ │ ├── __init__.py │ │ │ ├── extensions/ │ │ │ │ ├── __init__.py │ │ │ │ └── foxinsocks.py │ │ │ ├── fakes.py │ │ │ ├── test_api_validation.py │ │ │ ├── test_extensions.py │ │ │ ├── test_middlewares.py │ │ │ ├── test_wsgi.py │ │ │ └── v1/ │ │ │ ├── __init__.py │ │ │ ├── test_access_info.py │ │ │ ├── test_alert_source.py │ │ │ ├── test_alerts.py │ │ │ ├── test_storage_pools.py │ │ │ ├── test_storages.py │ │ │ └── test_volumes.py │ │ ├── conf_fixture.py │ │ ├── db/ │ │ │ ├── __init__.py │ │ │ └── test_db_api.py │ │ ├── drivers/ │ │ │ ├── __init__.py │ │ │ ├── dell_emc/ │ │ │ │ ├── __init__.py │ │ │ │ ├── power_store/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── test_power_store.py │ │ │ │ ├── scaleio/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_constans.py │ │ │ │ │ └── test_scaleio_stor.py │ │ │ │ ├── unity/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── test_emc_unity.py │ │ │ │ ├── vmax/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_alert_handler.py │ │ │ │ │ └── test_vmax.py │ │ │ │ ├── vnx/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── vnx_block/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── test_vnx_block.py │ │ │ │ └── vplex/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_emc_vplex.py │ │ │ ├── fujitsu/ │ │ │ │ ├── __init__.py │ │ │ │ └── eternus/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_eternus_stor.py │ │ │ ├── hitachi/ │ │ │ │ ├── __init__.py │ │ │ │ ├── hnas/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── constants.py │ │ │ │ │ └── test_hnas.py │ │ │ │ └── vsp/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_hitachi_vspstor.py │ │ │ ├── hpe/ │ │ │ │ ├── __init__.py │ │ │ │ ├── hpe_3par/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── test_hpe_3parstor.py │ │ │ │ └── hpe_msa/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_constans.py │ │ │ │ └── test_hpe_msastor.py │ │ │ ├── huawei/ │ │ │ │ ├── __init__.py │ │ │ │ └── oceanstor/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_alert_handler.py │ │ │ │ ├── test_oceanstor.py │ │ │ │ └── test_rest_client.py │ │ │ ├── ibm/ │ │ │ │ ├── __init__.py │ │ │ │ ├── ibm_ds8k/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── test_ibm_ds8k.py │ │ │ │ └── storwize_svc/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_ibm_storwize_svc.py │ │ │ ├── macro_san/ │ │ │ │ ├── __init__.py │ │ │ │ └── ms/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_ms_stor.py │ │ │ ├── netapp/ │ │ │ │ ├── __init__.py │ │ │ │ └── netapp_ontap/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_constans.py │ │ │ │ └── test_netapp.py │ │ │ ├── pure/ │ │ │ │ ├── __init__.py │ │ │ │ └── flasharray/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_pure_flasharray.py │ │ │ ├── test_api.py │ │ │ └── test_manager.py │ │ ├── exporter/ │ │ │ └── prometheus/ │ │ │ ├── __init__.py │ │ │ └── test_prometheus.py │ │ ├── fake_data.py │ │ ├── fake_notifier.py │ │ ├── leader_election/ │ │ │ ├── __init__.py │ │ │ └── distributor/ │ │ │ ├── __init__.py │ │ │ └── test_task_distributor.py │ │ ├── task_manager/ │ │ │ ├── __init__.py │ │ │ ├── scheduler/ │ │ │ │ ├── __init__.py │ │ │ │ ├── schedulers/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── telemetry/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_failed_performance_collection_handler.py │ │ │ │ │ ├── test_job_handler.py │ │ │ │ │ └── test_performance_collection_handler.py │ │ │ │ └── test_scheduler.py │ │ │ ├── test_alert_task.py │ │ │ ├── test_resources.py │ │ │ └── test_telemetry.py │ │ ├── test_context.py │ │ ├── test_coordination.py │ │ ├── test_manager.py │ │ ├── test_rpc.py │ │ ├── utils.py │ │ └── wsgi/ │ │ ├── __init__.py │ │ └── test_common.py │ ├── utils.py │ ├── version.py │ └── wsgi/ │ ├── __init__.py │ └── common.py ├── docker-compose.yml ├── etc/ │ └── delfin/ │ ├── api-paste.ini │ └── delfin.conf ├── installer/ │ ├── README.md │ ├── ansible/ │ │ ├── clean.yml │ │ ├── group_vars/ │ │ │ └── delfin.yml │ │ ├── local.hosts │ │ ├── roles/ │ │ │ ├── cleaner/ │ │ │ │ ├── scenarios/ │ │ │ │ │ ├── delfin.yml │ │ │ │ │ └── release.yml │ │ │ │ └── tasks/ │ │ │ │ └── main.yml │ │ │ └── delfin-installer/ │ │ │ ├── scenarios/ │ │ │ │ ├── container.yml │ │ │ │ ├── rabbitmq.yml │ │ │ │ ├── redis.yml │ │ │ │ ├── source-code.yml │ │ │ │ └── start-delfin.yml │ │ │ └── tasks/ │ │ │ └── main.yml │ │ ├── script/ │ │ │ ├── create_db.py │ │ │ └── virtualenv3_exec.j2 │ │ └── site.yml │ ├── helper.py │ ├── install │ ├── install.conf │ ├── install_delfin.py │ ├── install_dependencies.sh │ ├── precheck │ ├── uninstall │ └── util.sh ├── openapi-spec/ │ └── swagger.yaml ├── requirements.txt ├── script/ │ ├── create_db.py │ └── start.sh ├── setup.cfg ├── setup.py ├── test-requirements.txt └── tox.ini ================================================ FILE CONTENTS ================================================ ================================================ FILE: .coveragerc ================================================ [run] branch = true source = delfin omit = delfin/tests/* ================================================ FILE: .github/ISSUE_TEMPLATE.md ================================================ **Is this a BUG REPORT or FEATURE REQUEST?**: > Uncomment only one, leave it on its own line: > > /kind bug > /kind feature **What happened**: **What you expected to happen**: **How to reproduce it (as minimally and precisely as possible)**: **Anything else we need to know?**: **Environment**: - Delfin(release/branch) version: - OS (e.g. from /etc/os-release): - Kernel (e.g. `uname -a`): - Install tools: - Others: ================================================ FILE: .github/PULL_REQUEST_TEMPLATE.md ================================================ **What this PR does / why we need it**: **Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes # **Special notes for your reviewer**: **Release note**: ```release-note ``` ================================================ FILE: .github/workflows/delfin_ci.yml ================================================ name: Delfin CI on: [push, pull_request, workflow_dispatch] jobs: build: runs-on: ${{ matrix.platform }} strategy: max-parallel: 6 matrix: platform: [ubuntu-20.04] python-version: [ 3.8 ] steps: - uses: actions/checkout@v2 - name: Install Python version ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install -r test-requirements.txt pip install tox codecov - name: Unit test with tox run: tox - name: Test coverage with codecov run: codecov ================================================ FILE: .github/workflows/delfin_e2e_test.yml ================================================ name: Delfin E2E Test on: [push, pull_request, workflow_dispatch] jobs: test: runs-on: ${{ matrix.platform }} strategy: max-parallel: 6 matrix: platform: [ubuntu-20.04] python-version: [ 3.8 ] steps: - name: Checkout delfin code uses: actions/checkout@v2 - name: Install Python version ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install -r test-requirements.txt pip install tox codecov - name: E2E Testing - Add Test Driver to Delfin run: | str="\ \ \ \ \ \ \ \ \ \ \ \ 'test_vendor test_model = delfin.tests.e2e.testdriver:TestDriver'," sed -i "/FakeStorageDriver',/ a $str" ./setup.py shell: bash - name: E2E Testing - Install RabbitMQ uses: getong/rabbitmq-action@v1.2 with: rabbitmq version: '3.8.2-management-alpine' host port: 5672 rabbitmq user: 'guest' rabbitmq password: 'guest' - name: E2E Testing - Install Redis uses: supercharge/redis-github-action@1.2.0 with: redis-version: 6 - name: E2E Testing - Build and Deploy Delfin with Test driver run: | sudo mkdir -p /var/lib/delfin sudo chmod 0777 /var/lib/delfin sudo mkdir -p /etc/delfin sudo chmod 0777 /etc/delfin python3 setup.py install cp ./etc/delfin/api-paste.ini /etc/delfin/ python3 ./script/create_db.py --config-file ./etc/delfin/delfin.conf sleep 1 python3 ./delfin/cmd/task.py --config-file ./etc/delfin/delfin.conf > /tmp/task.log 2>&1 & python3 ./delfin/cmd/alert.py --config-file ./etc/delfin/delfin.conf > /tmp/alert.log 2>&1 & python3 ./delfin/cmd/api.py --config-file ./etc/delfin/delfin.conf > /tmp/api.log 2>&1 & shell: bash - name: E2E Testing - Run RobotFramework run: | sleep 3 pip install robotframework pip install robotframework-requests pip install robotframework-jsonlibrary DELFIN_DIR=`pwd` TOP_DIR="${DELFIN_DIR}/delfin/tests/e2e" ORIG_PATH='"storage.json"' FILE_PATH="${TOP_DIR}/testdriver/storage.json" sed -i "s|${ORIG_PATH}|\"${FILE_PATH}\"|g" $TOP_DIR/test.json sleep 1 robot delfin/tests/e2e shell: bash ================================================ FILE: .gitignore ================================================ # IDE config file .idea # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ ================================================ FILE: CHANGELOG/CHANGELOG-v1.0.0.md ================================================ # Changelog ## [v1.0.0](https://github.com/sodafoundation/delfin/tree/v1.0.0) (2020-09-29) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.8.0...v1.0.0) **Merged pull requests:** - Fixing config path to sync with ansible installer [\#346](https://github.com/sodafoundation/delfin/pull/346) ([PravinRanjan10](https://github.com/PravinRanjan10)) - Syncing dev branch to master [\#345](https://github.com/sodafoundation/delfin/pull/345) ([PravinRanjan10](https://github.com/PravinRanjan10)) - Standalone Installer script for delfin [\#342](https://github.com/sodafoundation/delfin/pull/342) ([PravinRanjan10](https://github.com/PravinRanjan10)) - Sync master to performance collection dev branch [\#339](https://github.com/sodafoundation/delfin/pull/339) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Updating Fake driver to sync with vmax model [\#338](https://github.com/sodafoundation/delfin/pull/338) ([PravinRanjan10](https://github.com/PravinRanjan10)) ## [v0.8.0](https://github.com/sodafoundation/delfin/tree/v0.8.0) (2020-09-28) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.6.0...v0.8.0) **Merged pull requests:** - Modifying some exception in VMAX and schema validation of ssh acces info [\#343](https://github.com/sodafoundation/delfin/pull/343) ([NajmudheenCT](https://github.com/NajmudheenCT)) ## [v0.6.0](https://github.com/sodafoundation/delfin/tree/v0.6.0) (2020-09-21) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.6.1...v0.6.0) **Merged pull requests:** - Code improvements [\#335](https://github.com/sodafoundation/delfin/pull/335) ([sushanthakumar](https://github.com/sushanthakumar)) - Fix static code check tools function depth defect [\#331](https://github.com/sodafoundation/delfin/pull/331) ([joseph-v](https://github.com/joseph-v)) - Fix static code check tool defects [\#330](https://github.com/sodafoundation/delfin/pull/330) ([joseph-v](https://github.com/joseph-v)) - Fix VMAX establish rest session [\#324](https://github.com/sodafoundation/delfin/pull/324) ([joseph-v](https://github.com/joseph-v)) - Fix volume name in VMAX driver [\#323](https://github.com/sodafoundation/delfin/pull/323) ([joseph-v](https://github.com/joseph-v)) - Remove plain text password caching in drivers [\#322](https://github.com/sodafoundation/delfin/pull/322) ([joseph-v](https://github.com/joseph-v)) - Correct input argument of StorageBackendException in oceanstor [\#317](https://github.com/sodafoundation/delfin/pull/317) ([joseph-v](https://github.com/joseph-v)) ## [v0.6.1](https://github.com/sodafoundation/delfin/tree/v0.6.1) (2020-09-21) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.4.0...v0.6.1) **Fixed bugs:** - sync\_status is always 'synced' in the reponse of a registration [\#241](https://github.com/sodafoundation/delfin/issues/241) - \[task manager\] Sync call stuck when rabbit-mq server is not running [\#129](https://github.com/sodafoundation/delfin/issues/129) **Closed issues:** - Dell EMC VMAX volume name is different from Unisphere dashboard [\#332](https://github.com/sodafoundation/delfin/issues/332) - Encrypt password before caching in drivers [\#329](https://github.com/sodafoundation/delfin/issues/329) **Merged pull requests:** - Performance metric-config-update API for delfin [\#333](https://github.com/sodafoundation/delfin/pull/333) ([PravinRanjan10](https://github.com/PravinRanjan10)) - VMAX driver Performance collection: Initial framework and array level for metrics collection [\#326](https://github.com/sodafoundation/delfin/pull/326) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Performance-collection framework for delfin [\#325](https://github.com/sodafoundation/delfin/pull/325) ([PravinRanjan10](https://github.com/PravinRanjan10)) - Query para driver changes for list alert api [\#319](https://github.com/sodafoundation/delfin/pull/319) ([sushanthakumar](https://github.com/sushanthakumar)) - Exception handling for delete snmp trap config [\#318](https://github.com/sodafoundation/delfin/pull/318) ([sushanthakumar](https://github.com/sushanthakumar)) - Alert sync api changes [\#316](https://github.com/sodafoundation/delfin/pull/316) ([sushanthakumar](https://github.com/sushanthakumar)) - Fix static code check defects [\#315](https://github.com/sodafoundation/delfin/pull/315) ([joseph-v](https://github.com/joseph-v)) - Fix warnings from static analyze tool [\#314](https://github.com/sodafoundation/delfin/pull/314) ([joseph-v](https://github.com/joseph-v)) - Query para update for list alert api [\#312](https://github.com/sodafoundation/delfin/pull/312) ([sushanthakumar](https://github.com/sushanthakumar)) ## [v0.4.0](https://github.com/sodafoundation/delfin/tree/v0.4.0) (2020-08-28) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.2.0...v0.4.0) **Implemented enhancements:** - Need a mechanism to support SSL certificate verify for HTTPS request from driver to storage device. [\#227](https://github.com/sodafoundation/delfin/issues/227) **Fixed bugs:** - \[Driver\] Create extended exceptions for StorageBackendException [\#184](https://github.com/sodafoundation/delfin/issues/184) - \[Alert Manager\] Irrelevant fields are shown as null in GET alert source [\#172](https://github.com/sodafoundation/delfin/issues/172) - \[VMAX driver\] Firmware version is missing [\#147](https://github.com/sodafoundation/delfin/issues/147) - VMAX volume details for volumes without Storage Group [\#74](https://github.com/sodafoundation/delfin/issues/74) - In api.py: Change function name discover\_storage to update\_storage\_driver [\#69](https://github.com/sodafoundation/delfin/issues/69) **Closed issues:** - \[Alert Manager\] Clear alert implementation for EMC Vmax [\#261](https://github.com/sodafoundation/delfin/issues/261) - \[Alert Manager\] Clear alert implementation for Huawei Oceanstor [\#260](https://github.com/sodafoundation/delfin/issues/260) - \[Alert Manager\] Clear alert analysis for storage backends [\#259](https://github.com/sodafoundation/delfin/issues/259) - \[Alert Manager\] Get alert analysis for storage backends [\#258](https://github.com/sodafoundation/delfin/issues/258) - \[Alert Manager\] Alert specification check for other storage backends [\#249](https://github.com/sodafoundation/delfin/issues/249) - \[Alert Manager\]Improve readability for alert model fields [\#239](https://github.com/sodafoundation/delfin/issues/239) - Update Project ReadMe [\#231](https://github.com/sodafoundation/delfin/issues/231) - Exporting alert model to export manager [\#126](https://github.com/sodafoundation/delfin/issues/126) - \[Alert manager\] Load all custom mibs from configured path [\#114](https://github.com/sodafoundation/delfin/issues/114) - \[Alert Manager\] Clear alert at backend [\#99](https://github.com/sodafoundation/delfin/issues/99) - \[task manager\] Push resource data to Exporter [\#93](https://github.com/sodafoundation/delfin/issues/93) - Handle the optimization issues in pool update [\#55](https://github.com/sodafoundation/delfin/issues/55) - Handle multi node use cases in Driver Manager [\#50](https://github.com/sodafoundation/delfin/issues/50) - Not correct behaviour of log info message [\#46](https://github.com/sodafoundation/delfin/issues/46) **Merged pull requests:** - Clear alert fix in hpe 3par driver [\#309](https://github.com/sodafoundation/delfin/pull/309) ([sushanthakumar](https://github.com/sushanthakumar)) - Alert source configuration range changes [\#308](https://github.com/sodafoundation/delfin/pull/308) ([sushanthakumar](https://github.com/sushanthakumar)) - Hpe3par: update SSL certificate verification method [\#307](https://github.com/sodafoundation/delfin/pull/307) ([jiangyutan](https://github.com/jiangyutan)) - Send clear event when snmp validation succeed [\#305](https://github.com/sodafoundation/delfin/pull/305) ([wisererik](https://github.com/wisererik)) - update next release version in setup.py [\#304](https://github.com/sodafoundation/delfin/pull/304) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Adding Configurable VMAX expiration time [\#303](https://github.com/sodafoundation/delfin/pull/303) ([PravinRanjan10](https://github.com/PravinRanjan10)) - Hpe3par:separate the common parts of rest and SSH interfaces [\#302](https://github.com/sodafoundation/delfin/pull/302) ([jiangyutan](https://github.com/jiangyutan)) - Updated delfin changes [\#299](https://github.com/sodafoundation/delfin/pull/299) ([sushanthakumar](https://github.com/sushanthakumar)) - List and clear alert changes for unisphere alerts [\#298](https://github.com/sodafoundation/delfin/pull/298) ([sushanthakumar](https://github.com/sushanthakumar)) - Optimizing vmax driver exception related code. [\#297](https://github.com/sodafoundation/delfin/pull/297) ([PravinRanjan10](https://github.com/PravinRanjan10)) - Fetching Default SRP for volumes which are not associated with storage group [\#296](https://github.com/sodafoundation/delfin/pull/296) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Hpe3par:modify traps;modify checkhealth's components [\#295](https://github.com/sodafoundation/delfin/pull/295) ([jiangyutan](https://github.com/jiangyutan)) - Add Secure backend driver and dynamic certificate reload [\#290](https://github.com/sodafoundation/delfin/pull/290) ([joseph-v](https://github.com/joseph-v)) - Remove debug infomation and Fix some grammar problems [\#289](https://github.com/sodafoundation/delfin/pull/289) ([jiangyutan](https://github.com/jiangyutan)) - Oceanstor driver return fix for clear alert [\#283](https://github.com/sodafoundation/delfin/pull/283) ([sushanthakumar](https://github.com/sushanthakumar)) - Handle invalid input while getting array details for VMAX driver [\#282](https://github.com/sodafoundation/delfin/pull/282) ([joseph-v](https://github.com/joseph-v)) - Adding name and firmware version for VMAX [\#277](https://github.com/sodafoundation/delfin/pull/277) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Fix oceanstor driver issue [\#276](https://github.com/sodafoundation/delfin/pull/276) ([wisererik](https://github.com/wisererik)) - hpe-3par driver support [\#274](https://github.com/sodafoundation/delfin/pull/274) ([jiangyutan](https://github.com/jiangyutan)) ## [v0.2.0](https://github.com/sodafoundation/delfin/tree/v0.2.0) (2020-08-11) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v0.1.0...v0.2.0) **Implemented enhancements:** - Remove example code because it will not be used [\#86](https://github.com/sodafoundation/delfin/issues/86) **Closed issues:** - Need to support SSH connection between delfin and devices. [\#245](https://github.com/sodafoundation/delfin/issues/245) - \[Alert Manager\] Alert model filling for Huawei OceanStor [\#195](https://github.com/sodafoundation/delfin/issues/195) **Merged pull requests:** - Update event type for alert model [\#273](https://github.com/sodafoundation/delfin/pull/273) ([wisererik](https://github.com/wisererik)) - Custom mib path enhancement [\#271](https://github.com/sodafoundation/delfin/pull/271) ([sushanthakumar](https://github.com/sushanthakumar)) - Alert source update with snmp validation [\#270](https://github.com/sodafoundation/delfin/pull/270) ([sushanthakumar](https://github.com/sushanthakumar)) - Update VMax driver to remove PyU4V lib [\#268](https://github.com/sodafoundation/delfin/pull/268) ([joseph-v](https://github.com/joseph-v)) - Adding raw\_capacity and subscribed capacity in VMAX driver [\#267](https://github.com/sodafoundation/delfin/pull/267) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Add configuration for exporter framework [\#266](https://github.com/sodafoundation/delfin/pull/266) ([wisererik](https://github.com/wisererik)) - Clear alert support [\#265](https://github.com/sodafoundation/delfin/pull/265) ([sushanthakumar](https://github.com/sushanthakumar)) - Alert model refine changes [\#264](https://github.com/sodafoundation/delfin/pull/264) ([sushanthakumar](https://github.com/sushanthakumar)) - Add raw capacity to database model [\#263](https://github.com/sodafoundation/delfin/pull/263) ([ThisIsClark](https://github.com/ThisIsClark)) - Modify the constant type of sync status [\#255](https://github.com/sodafoundation/delfin/pull/255) ([ThisIsClark](https://github.com/ThisIsClark)) - swagger correction in mutiple APIS [\#253](https://github.com/sodafoundation/delfin/pull/253) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Update access\_info model to support both REST and SSH. [\#246](https://github.com/sodafoundation/delfin/pull/246) ([sfzeng](https://github.com/sfzeng)) \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* ================================================ FILE: CHANGELOG/CHANGELOG-v1.1.0.md ================================================ # Changelog ## [Unreleased](https://github.com/sodafoundation/delfin/tree/HEAD) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v1.1.0-rc2...HEAD) **Closed issues:** - Module file is missing in IBM driver folder [\#455](https://github.com/sodafoundation/delfin/issues/455) - Open API Spec \(Swagger file\) do not display disk api correctly [\#453](https://github.com/sodafoundation/delfin/issues/453) **Merged pull requests:** - Add python module file for ibm drivers [\#456](https://github.com/sodafoundation/delfin/pull/456) ([joseph-v](https://github.com/joseph-v)) - Fix api spec for format error in disk api [\#454](https://github.com/sodafoundation/delfin/pull/454) ([joseph-v](https://github.com/joseph-v)) ## [v1.1.0-rc2](https://github.com/sodafoundation/delfin/tree/v1.1.0-rc2) (2021-01-05) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v1.1.0-rc1...v1.1.0-rc2) **Merged pull requests:** - Performance collection unregistration during storage deletion [\#451](https://github.com/sodafoundation/delfin/pull/451) ([sushanthakumar](https://github.com/sushanthakumar)) - Add EMC Unity driver [\#442](https://github.com/sodafoundation/delfin/pull/442) ([sushanthakumar](https://github.com/sushanthakumar)) - delfin 0.8.0 to master [\#439](https://github.com/sodafoundation/delfin/pull/439) ([PravinRanjan10](https://github.com/PravinRanjan10)) - Seperation of task service class [\#435](https://github.com/sodafoundation/delfin/pull/435) ([sushanthakumar](https://github.com/sushanthakumar)) ## [v1.1.0-rc1](https://github.com/sodafoundation/delfin/tree/v1.1.0-rc1) (2020-12-24) [Full Changelog](https://github.com/sodafoundation/delfin/compare/v1.0.0...v1.1.0-rc1) **Closed issues:** - Add python-dev instruction for delfin installation [\#418](https://github.com/sodafoundation/delfin/issues/418) - Provide additional labels, storage name & storage serial number, in metrics collection data sent to Prometheus exporter [\#417](https://github.com/sodafoundation/delfin/issues/417) - API update \(swagger\) for resource model improvement [\#405](https://github.com/sodafoundation/delfin/issues/405) - Resource management improvement implementation [\#404](https://github.com/sodafoundation/delfin/issues/404) - oslo\_service.wsgi.ConfigNotFound: Could not find config at api-paste.ini [\#390](https://github.com/sodafoundation/delfin/issues/390) - Code improvements based on 0.8.0 version [\#356](https://github.com/sodafoundation/delfin/issues/356) - Delfin installation doesn't work [\#340](https://github.com/sodafoundation/delfin/issues/340) **Merged pull requests:** - Adding Prometheus alert manager exporter [\#437](https://github.com/sodafoundation/delfin/pull/437) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Update installer for the changed exporter path [\#436](https://github.com/sodafoundation/delfin/pull/436) ([joseph-v](https://github.com/joseph-v)) - Delfin exporter configurations and modified exporter selction mechanism [\#433](https://github.com/sodafoundation/delfin/pull/433) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Update oceanstor for Controller, Port and Disk resource support [\#426](https://github.com/sodafoundation/delfin/pull/426) ([joseph-v](https://github.com/joseph-v)) - Adding more labels to array leval metrics [\#422](https://github.com/sodafoundation/delfin/pull/422) ([NajmudheenCT](https://github.com/NajmudheenCT)) - Add python3-dev package instruction [\#419](https://github.com/sodafoundation/delfin/pull/419) ([Anmolbansal1](https://github.com/Anmolbansal1)) - Update Delfin APIs for new resources controller, port & disk [\#415](https://github.com/sodafoundation/delfin/pull/415) ([joseph-v](https://github.com/joseph-v)) - Add Port resource to Delfin [\#408](https://github.com/sodafoundation/delfin/pull/408) ([joseph-v](https://github.com/joseph-v)) - Add Disk resource to Delfin [\#407](https://github.com/sodafoundation/delfin/pull/407) ([joseph-v](https://github.com/joseph-v)) - Delfin resource support enhancement for 'controller' [\#403](https://github.com/sodafoundation/delfin/pull/403) ([joseph-v](https://github.com/joseph-v)) \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* ================================================ FILE: Dockerfile ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM ubuntu:18.04 MAINTAINER soda team RUN apt-get update -y && \ apt-get install -y python3-pip && \ apt-get install -y sqlite3 && \ apt-get install -y libffi-dev && \ pip3 install --upgrade pip ADD . /delfin WORKDIR /delfin RUN pip3 install -r requirements.txt && \ python3 setup.py install ENTRYPOINT ["/delfin/script/start.sh"] ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ================================================ FILE: README.md ================================================ # delfin : SODA Infrastructure Manager Project [![Build Status](https://travis-ci.com/sodafoundation/delfin.svg?branch=master)](https://travis-ci.com/sodafoundation/delfin) [![codecov.io](https://codecov.io/github/sodafoundation/delfin/coverage.svg?branch=master)](https://codecov.io/github/sodafoundation/delfin?branch=master) [![Releases](https://img.shields.io/github/release/sodafoundation/delfin/all.svg?style=flat-square)](https://github.com/sodafoundation/delfin/releases) [![LICENSE](https://img.shields.io/github/license/sodafoundation/delfin.svg?style=flat-square)](https://github.com/sodafoundation/delfin/blob/master/LICENSE) ## Introduction delfin (Dolphin in spanish!) , the SODA Infrastructure Manager project is an an open source project to provide unified, intelligent and scalable resource management, alert and performance monitoring. It will cover the resource management of all the storage backends & other infrastructures under SODA deployment. It will also provide the alert management and metric data(performance/health) for monitoring and further analysis. It will provide a scalable framework where more and more backends as well as client exporters can be added. This will enable to add more storage and infrastructure backends and also support different management clients for monitoring and health prediction. It provides unified APIs to access, export and connect with clients as well as a set of interfaces for various driver addition. This is one of the SODA Core Projects and is maintained by SODA Foundation directly. ## Documentation [https://docs.sodafoundation.io](https://docs.sodafoundation.io/) ## Quick Start - To Use/Experience [https://docs.sodafoundation.io/guides/user-guides/delfin](https://docs.sodafoundation.io/guides/user-guides/delfin/) ## Quick Start - To Develop [https://docs.sodafoundation.io/guides/developer-guides/delfin](https://docs.sodafoundation.io/guides/developer-guides/delfin/) ## Demo videos - To get to know the capabilities better [https://www.youtube.com/watch?v=WtlxF7SHID4](https://www.youtube.com/watch?v=WtlxF7SHID4) ## Latest Releases [https://github.com/sodafoundation/delfin/releases](https://github.com/sodafoundation/delfin/releases) ## Support and Issues [https://github.com/sodafoundation/delfin/issues](https://github.com/sodafoundation/delfin/issues) ## Project Community [https://sodafoundation.io/slack/](https://sodafoundation.io/slack/) ## How to contribute to this project? Join [https://sodafoundation.io/slack/](https://sodafoundation.io/slack/) and share your interest in the ‘general’ channel Checkout [https://github.com/sodafoundation/delfin/issues](https://github.com/sodafoundation/delfin/issues) labelled with ‘good first issue’ or ‘help needed’ or ‘help wanted’ or ‘StartMyContribution’ or ‘SMC’ ## Project Roadmap We want to build a unified intelligent and scalable infrastructure management framework for resource management (config, add, remove, update), alert management and performance metrics management. [https://docs.sodafoundation.io](https://docs.sodafoundation.io/) ## Join SODA Foundation Website : [https://sodafoundation.io](https://sodafoundation.io/) Slack : [https://sodafoundation.io/slack/](https://sodafoundation.io/slack/) Twitter : [@sodafoundation](https://twitter.com/sodafoundation) Mailinglist : [https://lists.sodafoundation.io](https://lists.sodafoundation.io/) ================================================ FILE: codecov.yml ================================================ comment: layout: "header, diff, tree" coverage: range: "70...100" precision: 2 round: down status: project: default: target: 70% patch: off ================================================ FILE: delfin/__init__.py ================================================ ================================================ FILE: delfin/alert_manager/__init__.py ================================================ ================================================ FILE: delfin/alert_manager/alert_processor.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import threading from oslo_log import log from delfin import context from delfin import coordination from delfin import db from delfin import exception from delfin.common import alert_util from delfin.drivers import api as driver_manager from delfin.exporter import base_exporter from delfin.task_manager import rpcapi LOG = log.getLogger(__name__) class AlertProcessor(object): """Alert model translation and export functions""" def __init__(self): self.driver_manager = driver_manager.API() self.exporter_manager = base_exporter.AlertExporterManager() self.task_rpcapi = rpcapi.TaskAPI() def process_alert_info(self, alert): """Fills alert model using driver manager interface.""" ctxt = context.get_admin_context() storage = db.storage_get(ctxt, alert['storage_id']) alert_model = {} try: alert_model = self.driver_manager.parse_alert(ctxt, alert['storage_id'], alert) # Fill storage specific info if alert_model: storage = self.get_storage_from_parsed_alert( ctxt, storage, alert_model) alert_util.fill_storage_attributes(alert_model, storage) except exception.IncompleteTrapInformation as e: LOG.warning(e) threading.Thread(target=self.sync_storage_alert, args=(ctxt, alert['storage_id'])).start() except exception.AlertSourceNotFound: LOG.info("Could not identify alert source from parsed alert. " "Skipping the dispatch of alert") return except Exception as e: LOG.error(e) raise exception.InvalidResults( "Failed to fill the alert model from driver.") # Export to base exporter which handles dispatch for all exporters if alert_model: LOG.info("Dispatching one SNMP Trap to {} with sn {}".format( alert_model['storage_id'], alert_model['serial_number'])) self.exporter_manager.dispatch(ctxt, [alert_model]) def get_storage_from_parsed_alert(self, ctxt, storage, alert_model): # If parse_alert sets 'serial_number' or 'storage_name' in the # alert_model, we need to get corresponding storage details # from the db and fill that in alert_model storage_sn = alert_model.get('serial_number') storage_name = alert_model.get('storage_name') filters = { "vendor": storage['vendor'], "model": storage['model'], } try: if storage_sn and storage_sn != storage['serial_number']: filters['serial_number'] = storage_sn elif storage_name and storage_name != storage['name']: filters['name'] = storage_name else: return storage storage_list = db.storage_get_all(ctxt, filters=filters) if not storage_list: msg = "Failed to get destination storage for SNMP Trap. " \ "Storage with serial number {} or storage name {} " \ "not found in DB".format(storage_sn, storage_name) raise exception.AlertSourceNotFound(msg) db.alert_source_get(ctxt, storage_list[0]['id']) storage = storage_list[0] except exception.AlertSourceNotFound: LOG.info("Storage with serial number {} or name {} " "is not registered for receiving " "SNMP Trap".format(storage_sn, storage_name)) raise return storage @coordination.synchronized('sync-trap-{storage_id}', blocking=False) def sync_storage_alert(self, context, storage_id): time.sleep(10) self.task_rpcapi.sync_storage_alerts(context, storage_id, None) ================================================ FILE: delfin/alert_manager/constants.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # SNMP dispatcher job id (static identifier) SNMP_DISPATCHER_JOB_ID = 1 # Valid SNMP versions. SNMP_V1_INT = 1 SNMP_V2_INT = 2 SNMP_V3_INT = 3 VALID_SNMP_VERSIONS = {"snmpv1": SNMP_V1_INT, "snmpv2c": SNMP_V2_INT, "snmpv3": SNMP_V3_INT} # Default limitation for batch query. DEFAULT_LIMIT = 1000 ================================================ FILE: delfin/alert_manager/rpcapi.py ================================================ # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the alert manager RPC API. """ import oslo_messaging as messaging from oslo_config import cfg from delfin import rpc CONF = cfg.CONF class AlertAPI(object): """Client side of the alert manager rpc API. API version history: 1.0 - Initial version. """ RPC_API_VERSION = '1.0' def __init__(self): super(AlertAPI, self).__init__() target = messaging.Target(topic=CONF.delfin_alert_topic, version=self.RPC_API_VERSION) self.client = rpc.get_client(target, version_cap=self.RPC_API_VERSION) def sync_snmp_config(self, ctxt, snmp_config_to_del, snmp_config_to_add): call_context = self.client.prepare(version='1.0', fanout=True) return call_context.cast(ctxt, 'sync_snmp_config', snmp_config_to_del=snmp_config_to_del, snmp_config_to_add=snmp_config_to_add) def check_snmp_config(self, ctxt, snmp_config): call_context = self.client.prepare(version='1.0') return call_context.cast(ctxt, 'check_snmp_config', snmp_config=snmp_config) ================================================ FILE: delfin/alert_manager/snmp_validator.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import binascii import copy import six from oslo_config import cfg from oslo_log import log from oslo_utils import encodeutils from pyasn1.type.univ import OctetString from pysnmp.entity.rfc3413.oneliner import cmdgen from delfin import cryptor from delfin import db from delfin import exception from delfin import utils from delfin.common import constants from delfin.exporter import base_exporter CONF = cfg.CONF LOG = log.getLogger(__name__) class SNMPValidator(object): def __init__(self): self.exporter = base_exporter.AlertExporterManager() self.snmp_error_flag = {} def validate(self, ctxt, alert_source): engine_id = alert_source.get('engine_id') try: hosts = alert_source['host'].split(',') temp_alert_source = copy.deepcopy(alert_source) # Sets a value to raise a SNMPConnectionFailed when multiple # alarm sources fail to be verified connection_times = 0 for host in hosts: temp_alert_source['host'] = host try: connection_times += 1 alert_source = \ self.validate_connectivity(ctxt, temp_alert_source) break except Exception as e: if connection_times == len(hosts): raise e # If protocol is snmpv3, the snmp_validator will update # engine id if engine id is empty. Therefore, engine id # should be saved in database. if not engine_id and alert_source.get('engine_id'): alert_source_dict = { 'engine_id': alert_source.get('engine_id')} db.alert_source_update(ctxt, alert_source.get('storage_id'), alert_source_dict) self._handle_validation_result(ctxt, alert_source.get('storage_id'), constants.Category.RECOVERY) except exception.SNMPConnectionFailed: self._handle_validation_result(ctxt, alert_source.get('storage_id')) except Exception as e: msg = six.text_type(e) LOG.error("Failed to check snmp config. Reason: %s", msg) @staticmethod def validate_connectivity(ctxt, alert_source): # Fill optional parameters with default values if not set in input if not alert_source.get('port'): alert_source['port'] = constants.DEFAULT_SNMP_CONNECT_PORT if not alert_source.get('context_name'): alert_source['context_name'] = None if not alert_source.get('retry_num'): alert_source['retry_num'] = constants.DEFAULT_SNMP_RETRY_NUM if not alert_source.get('expiration'): alert_source['expiration'] = constants.DEFAULT_SNMP_EXPIRATION_TIME if CONF.snmp_validation_enabled is False: return alert_source storage_id = alert_source.get('storage_id') access_info = db.access_info_get(ctxt, storage_id) access_info = dict(access_info) if access_info.get('model') not in constants.SNMP_SUPPORTED_MODELS: return alert_source cmd_gen = cmdgen.CommandGenerator() version = alert_source.get('version') # Connect to alert source through snmp get to check the configuration try: target = cmdgen.UdpTransportTarget((alert_source['host'], alert_source['port']), timeout=alert_source[ 'expiration'], retries=alert_source[ 'retry_num']) target.setLocalAddress((CONF.my_ip, 0)) if version.lower() == 'snmpv3': # Register engine observer to get engineId, # Code reference from: http://snmplabs.com/pysnmp/ observer_context = {} cmd_gen.snmpEngine.observer.registerObserver( lambda e, p, v, c: c.update( securityEngineId=v['securityEngineId']), 'rfc3412.prepareDataElements:internal', cbCtx=observer_context ) auth_key = None if alert_source['auth_key']: auth_key = encodeutils.to_utf8( cryptor.decode(alert_source['auth_key'])) privacy_key = None if alert_source['privacy_key']: privacy_key = encodeutils.to_utf8( cryptor.decode(alert_source['privacy_key'])) auth_protocol = None privacy_protocol = None if alert_source['auth_protocol']: auth_protocol = constants.AUTH_PROTOCOL_MAP.get( alert_source['auth_protocol'].lower()) if alert_source['privacy_protocol']: privacy_protocol = constants.PRIVACY_PROTOCOL_MAP.get( alert_source['privacy_protocol'].lower()) engine_id = alert_source.get('engine_id') if engine_id: engine_id = OctetString.fromHexString(engine_id) error_indication, __, __, __ = cmd_gen.getCmd( cmdgen.UsmUserData(alert_source['username'], authKey=auth_key, privKey=privacy_key, authProtocol=auth_protocol, privProtocol=privacy_protocol, securityEngineId=engine_id), target, constants.SNMP_QUERY_OID, ) if 'securityEngineId' in observer_context: engine_id = observer_context.get('securityEngineId') alert_source['engine_id'] = binascii.hexlify( engine_id.asOctets()).decode() else: community_string = encodeutils.to_utf8( cryptor.decode(alert_source['community_string'])) error_indication, __, __, __ = cmd_gen.getCmd( cmdgen.CommunityData( community_string, contextName=alert_source['context_name']), target, constants.SNMP_QUERY_OID, ) cmd_gen.snmpEngine.transportDispatcher.closeDispatcher() if not error_indication: return alert_source # Prepare exception with error_indication msg = six.text_type(error_indication) except Exception as e: msg = six.text_type(e) # Since validation occur error, raise exception LOG.error("Configuration validation failed with alert source for " "reason: %s." % msg) raise exception.SNMPConnectionFailed(msg) def _handle_validation_result(self, ctxt, storage_id, category=constants.Category.FAULT): try: storage = db.storage_get(ctxt, storage_id) serial_number = storage.get('serial_number') if category == constants.Category.FAULT: self.snmp_error_flag[serial_number] = True self._dispatch_snmp_validation_alert(ctxt, storage, category) elif self.snmp_error_flag.get(serial_number, True): self.snmp_error_flag[serial_number] = False self._dispatch_snmp_validation_alert(ctxt, storage, category) except Exception as e: msg = six.text_type(e) LOG.error("Exception occurred when handling validation " "error: %s ." % msg) def _dispatch_snmp_validation_alert(self, ctxt, storage, category): alert = { 'storage_id': storage['id'], 'storage_name': storage['name'], 'vendor': storage['vendor'], 'model': storage['model'], 'serial_number': storage['serial_number'], 'alert_id': constants.SNMP_CONNECTION_FAILED_ALERT_ID, 'sequence_number': 0, 'alert_name': 'SNMP connect failed', 'category': category, 'severity': constants.Severity.MAJOR, 'type': constants.EventType.COMMUNICATIONS_ALARM, 'location': 'NetworkEntity=%s' % storage['name'], 'description': "SNMP connection to the storage failed. " "SNMP traps from storage will not be received.", 'recovery_advice': "1. The network connection is abnormal. " "2. SNMP authentication parameters " "are invalid.", 'occur_time': utils.utcnow_ms(), } self.exporter.dispatch(ctxt, alert) ================================================ FILE: delfin/alert_manager/trap_receiver.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log from oslo_service import periodic_task from oslo_utils import encodeutils from pysnmp.carrier.asyncore.dgram import udp from pysnmp.entity import engine, config from pysnmp.entity.rfc3413 import ntfrcv from pysnmp.proto.api import v2c from pysnmp.smi import builder, view from retrying import retry from delfin import context, cryptor from delfin import db from delfin import exception from delfin import manager from delfin.alert_manager import alert_processor from delfin.alert_manager import constants from delfin.alert_manager import rpcapi from delfin.alert_manager import snmp_validator from delfin.common import constants as common_constants from delfin.db import api as db_api from delfin.i18n import _ LOG = log.getLogger(__name__) class TrapReceiver(manager.Manager): """Trap listening and processing functions""" RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): self.mib_view_controller = kwargs.get('mib_view_controller') self.snmp_engine = kwargs.get('snmp_engine') self.trap_receiver_address = kwargs.get('trap_receiver_address') self.trap_receiver_port = kwargs.get('trap_receiver_port') self.alert_processor = alert_processor.AlertProcessor() self.snmp_validator = snmp_validator.SNMPValidator() self.alert_rpc_api = rpcapi.AlertAPI() super(TrapReceiver, self).__init__(host=kwargs.get('host')) def sync_snmp_config(self, ctxt, snmp_config_to_del=None, snmp_config_to_add=None): if snmp_config_to_del: self._delete_snmp_config(ctxt, snmp_config_to_del) if snmp_config_to_add: self.snmp_validator.validate(ctxt, snmp_config_to_add) self._add_snmp_config(ctxt, snmp_config_to_add) def _add_snmp_config(self, ctxt, new_config): storage_id = new_config.get("storage_id") LOG.info("Start to add snmp trap config for storage: %s", storage_id) try: version_int = self._get_snmp_version_int(ctxt, new_config.get("version")) if version_int == constants.SNMP_V2_INT or \ version_int == constants.SNMP_V1_INT: community_string = cryptor.decode( new_config.get("community_string")) community_string = encodeutils.to_utf8(community_string) community_index = self._get_community_index(storage_id) config.addV1System(self.snmp_engine, community_index, community_string, contextName=community_string) else: username = new_config.get("username") engine_id = new_config.get("engine_id") if engine_id: engine_id = v2c.OctetString(hexValue=engine_id) auth_key = new_config.get("auth_key") auth_protocol = new_config.get("auth_protocol") privacy_key = new_config.get("privacy_key") privacy_protocol = new_config.get("privacy_protocol") if auth_key: auth_key = encodeutils.to_utf8(cryptor.decode(auth_key)) if privacy_key: privacy_key = encodeutils.to_utf8( cryptor.decode(privacy_key)) config.addV3User( self.snmp_engine, userName=username, authKey=auth_key, privKey=privacy_key, authProtocol=self._get_usm_auth_protocol(ctxt, auth_protocol), privProtocol=self._get_usm_priv_protocol(ctxt, privacy_protocol), securityEngineId=engine_id) LOG.info("Add snmp trap config for storage: %s successfully.", storage_id) except Exception as e: msg = six.text_type(e) LOG.error("Failed to add snmp trap config for storage: %s. " "Reason: %s", storage_id, msg) raise e def _delete_snmp_config(self, ctxt, snmp_config): LOG.info("Start to remove snmp trap config.") version_int = self._get_snmp_version_int(ctxt, snmp_config.get("version")) if version_int == constants.SNMP_V3_INT: username = snmp_config.get('username') engine_id = snmp_config.get('engine_id') if engine_id: engine_id = v2c.OctetString(hexValue=engine_id) try: config.delV3User(self.snmp_engine, userName=username, securityEngineId=engine_id) except Exception as e: msg = six.text_type(e) LOG.warning("Snmp trap configuration to be " "deleted could not be found. Reason: %s", msg) else: storage_id = snmp_config.get('storage_id') community_index = self._get_community_index(storage_id) config.delV1System(self.snmp_engine, community_index) def _get_community_index(self, storage_id): return storage_id.replace('-', '') def _get_snmp_version_int(self, ctxt, version): _version = version.lower() version_int = constants.VALID_SNMP_VERSIONS.get(_version) if version_int is None: msg = "Invalid snmp version %s." % version raise exception.InvalidSNMPConfig(msg) return version_int def _get_usm_auth_protocol(self, ctxt, auth_protocol): if auth_protocol: usm_auth_protocol = common_constants.AUTH_PROTOCOL_MAP \ .get(auth_protocol.lower()) if usm_auth_protocol: return usm_auth_protocol else: msg = "Invalid auth_protocol %s." % auth_protocol raise exception.InvalidSNMPConfig(msg) else: return config.usmNoAuthProtocol def _get_usm_priv_protocol(self, ctxt, privacy_protocol): if privacy_protocol: usm_priv_protocol = common_constants.PRIVACY_PROTOCOL_MAP.get( privacy_protocol.lower()) if usm_priv_protocol: return usm_priv_protocol else: msg = "Invalid privacy_protocol %s." % privacy_protocol raise exception.InvalidSNMPConfig(msg) return config.usmNoPrivProtocol def _mib_builder(self): """Loads given set of mib files from given path.""" mib_builder = builder.MibBuilder() self.mib_view_controller = view.MibViewController(mib_builder) def _add_transport(self): """Configures the transport parameters for the snmp engine.""" try: config.addTransport( self.snmp_engine, udp.domainName, udp.UdpTransport().openServerMode( (self.trap_receiver_address, int(self.trap_receiver_port))) ) except Exception as e: LOG.error('Failed to add transport, error is %s' % six.text_type(e)) raise exception.DelfinException(message=six.text_type(e)) @staticmethod def _get_alert_source_by_host(source_ip): """Gets alert source for given source ip address.""" filters = {'host~': source_ip} ctxt = context.RequestContext() # Using the known filter and db exceptions are handled by api alert_sources = db.alert_source_get_all(ctxt, filters=filters) if not alert_sources: raise exception.AlertSourceNotFoundWithHost(source_ip) # This is to make sure unique host is configured each alert source unique_alert_source = None if len(alert_sources) > 1: # Clear invalid alert_source for alert_source in alert_sources: try: db.storage_get(ctxt, alert_source['storage_id']) except exception.StorageNotFound: LOG.warning('Found redundancy alert source for storage %s' % alert_source['storage_id']) try: db.alert_source_delete( ctxt, alert_source['storage_id']) except Exception as e: LOG.warning('Delete the invalid alert source failed, ' 'reason is %s' % six.text_type(e)) else: unique_alert_source = alert_source else: unique_alert_source = alert_sources[0] if unique_alert_source is None: msg = (_("Failed to get unique alert source with host %s.") % source_ip) raise exception.InvalidResults(msg) return unique_alert_source def _cb_fun(self, state_reference, context_engine_id, context_name, var_binds, cb_ctx): """Callback function to process the incoming trap.""" exec_context = self.snmp_engine.observer.getExecutionContext( 'rfc3412.receiveMessage:request') LOG.debug("Get notification from: %s" % "#".join([str(x) for x in exec_context['transportAddress']])) alert = {} try: # transportAddress contains both ip and port, extract ip address source_ip = exec_context['transportAddress'][0] alert_source = self._get_alert_source_by_host(source_ip) # In case of non v3 version, community string is used to map the # trap. Pysnmp library helps to filter traps whose community string # are not configured. But if a given community name x is configured # for storage1, if the trap is received with x from storage 2, # library will allow the trap. So for non v3 version, we need to # verify that community name is configured at alert source db for # the storage which is sending traps. # context_name contains the incoming community string value if exec_context['securityModel'] != constants.SNMP_V3_INT \ and cryptor.decode(alert_source['community_string']) \ != str(context_name): msg = (_("Community string not matching with alert source %s, " "dropping it.") % source_ip) raise exception.InvalidResults(msg) for oid, val in var_binds: # Fill raw oid and values oid_str = str(oid) alert[oid_str] = str(val) # Fill additional info to alert info alert['transport_address'] = source_ip alert['storage_id'] = alert_source['storage_id'] filters = {'mgmt_ip': source_ip, 'storage_id': alert_source['storage_id']} ctxt = context.RequestContext() controllers = db.controller_get_all(ctxt, filters=filters) if controllers: alert['controller_name'] = controllers[0].get('name') # Handover to alert processor for model translation and export self.alert_processor.process_alert_info(alert) except exception.DelfinException as e: # Log and end the trap processing error flow err_msg = _("Failed to process alert report (%s).") % e.msg LOG.exception(err_msg) except Exception as e: err_msg = six.text_type(e) LOG.exception(err_msg) def _load_snmp_config(self): """Load snmp config from database when service start.""" ctxt = context.get_admin_context() marker = None finished = False limit = constants.DEFAULT_LIMIT while not finished: alert_sources = db_api.alert_source_get_all(ctxt, marker=marker, limit=limit) for alert_source in alert_sources: snmp_config = dict() snmp_config.update(alert_source) self._add_snmp_config(ctxt, snmp_config) marker = alert_source['storage_id'] if len(alert_sources) < limit: finished = True @retry(stop_max_attempt_number=180, wait_random_min=4000, wait_random_max=6000) def start(self): """Starts the snmp trap receiver with necessary prerequisites.""" snmp_engine = engine.SnmpEngine() self.snmp_engine = snmp_engine try: # Load all the mibs and do snmp config self._mib_builder() self._load_snmp_config() # Register callback for notification receiver ntfrcv.NotificationReceiver(snmp_engine, self._cb_fun) # Add transport info(ip, port) and start the listener self._add_transport() snmp_engine.transportDispatcher.jobStarted( constants.SNMP_DISPATCHER_JOB_ID) except Exception as e: LOG.error(e) raise ValueError("Failed to setup for trap listener.") try: LOG.info("Starting trap receiver.") snmp_engine.transportDispatcher.runDispatcher() except Exception: snmp_engine.transportDispatcher.closeDispatcher() raise ValueError("Failed to start trap listener.") def stop(self): """Brings down the snmp trap receiver.""" # Go ahead with shutdown, ignore if any errors happening during the # process as it is shutdown if self.snmp_engine: self.snmp_engine.transportDispatcher.closeDispatcher() LOG.info("Trap receiver stopped.") @periodic_task.periodic_task(spacing=1800, run_immediately=True) def heart_beat_task_spawn(self, ctxt): """Periodical task to spawn snmp heart beat check.""" LOG.info("Spawn the snmp heart beat check task.") alert_source_list = db.alert_source_get_all(ctxt) for alert_source in alert_source_list: self.alert_rpc_api.check_snmp_config(ctxt, alert_source) def check_snmp_config(self, ctxt, snmp_config): LOG.info("Received snmp config checking request for " "storage: %s", snmp_config['storage_id']) self.snmp_validator.validate(ctxt, snmp_config) ================================================ FILE: delfin/api/__init__.py ================================================ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import paste.urlmap def root_app_factory(loader, global_conf, **local_conf): return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) ================================================ FILE: delfin/api/api_utils.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from oslo_config import cfg from oslo_log import log from oslo_utils import strutils from delfin.common import constants from delfin import exception from delfin.i18n import _ api_common_opts = [ cfg.IntOpt('api_max_limit', default=1000, help='The maximum number of items that a collection ' 'resource returns in a single response'), ] CONF = cfg.CONF CONF.register_opts(api_common_opts) LOG = log.getLogger(__name__) def remove_invalid_options(context, search_options, allowed_search_options): """Remove search options that are not valid for API/context.""" unknown_options = [opt for opt in search_options if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) LOG.debug("Removing options '%(bad_options)s' from query", {"bad_options": bad_options}) for opt in unknown_options: del search_options[opt] def validate_integer(value, name, min_value=None, max_value=None): """Make sure that value is a valid integer, potentially within range. :param value: the value of the integer :param name: the name of the integer :param min_value: the min_value of the integer :param max_value: the max_value of the integer :returns: integer """ try: value = strutils.validate_integer(value, name, min_value, max_value) return value except ValueError as e: raise exception.InvalidInput(six.text_type(e)) def get_pagination_params(params, max_limit=None): """Return marker, limit, offset tuple from request. :param params: `wsgi.Request`'s GET dictionary, possibly containing 'marker', 'limit', and 'offset' variables. 'marker' is the id of the last element the client has seen, 'limit' is the maximum number of items to return and 'offset' is the number of items to skip from the marker or from the first element. If 'limit' is not specified, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause delfin.InvalidInput() exceptions to be raised. If no offset is present we'll default to 0 and if no marker is present we'll default to None. :param max_limit: Max value 'limit' return value can take :returns: Tuple (marker, limit, offset) """ max_limit = max_limit or CONF.api_max_limit limit = _get_limit_param(params, max_limit) marker = _get_marker_param(params) offset = _get_offset_param(params) return marker, limit, offset def _get_limit_param(params, max_limit=None): """Extract integer limit from request's dictionary or fail. Defaults to max_limit if not present and returns max_limit if present 'limit' is greater than max_limit. """ max_limit = max_limit or CONF.osapi_max_limit try: limit = int(params.pop('limit', max_limit)) except ValueError: msg = _('limit param must be an integer') raise exception.InvalidInput(msg) if limit < 0: msg = _('limit param must be positive') raise exception.InvalidInput(msg) limit = min(limit, max_limit) return limit def _get_marker_param(params): """Extract marker id from request's dictionary (defaults to None).""" return params.pop('marker', None) def _get_offset_param(params): """Extract offset id from request's dictionary (defaults to 0) or fail.""" offset = params.pop('offset', 0) return validate_integer(offset, 'offset', 0, constants.DB_MAX_INT) def get_sort_params(params, default_key='created_at', default_dir='desc'): """Retrieves sort keys/directions parameters. Processes the parameters to create a list of sort keys and sort directions that correspond to either the 'sort' parameter or the 'sort_key' and 'sort_dir' parameter values. The value of the 'sort' parameter is a comma- separated list of sort keys, each key is optionally appended with ':'. The sort parameters are removed from the request parameters by this function. :param params: query parameters in the request :param default_key: default sort key value, added to the list if no sort keys are supplied :param default_dir: default sort dir value, added to the list if the corresponding key does not have a direction specified :returns: list of sort keys, list of sort dirs """ sort_keys = [] sort_dirs = [] if 'sort' in params: for sort in params.pop('sort').strip().split(','): sort_key, _sep, sort_dir = sort.partition(':') if not sort_dir: sort_dir = default_dir sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir.strip()) else: sort_key = params.pop('sort_key', default_key) sort_dir = params.pop('sort_dir', default_dir) sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir.strip()) return sort_keys, sort_dirs ================================================ FILE: delfin/api/common/__init__.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack API controllers. """ from oslo_log import log from oslo_service import wsgi as base_wsgi import routes from delfin.api.common import wsgi from delfin.i18n import _ LOG = log.getLogger(__name__) class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url == "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) def connect(self, *args, **kwargs): # NOTE(inhye): Default the format part of a route to only accept json # and xml so it doesn't eat all characters after a '.' # in the url. kwargs.setdefault('requirements', {}) if not kwargs['requirements'].get('format'): kwargs['requirements']['format'] = 'json|xml' return routes.Mapper.connect(self, *args, **kwargs) class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '/' else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '/%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) class APIRouter(base_wsgi.Router): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = None # override in subclasses @classmethod def factory(cls, global_config, **local_config): """Simple paste factory, :class:`delfin.wsgi.Router` doesn't have.""" return cls() def __init__(self, ext_mgr=None): if ext_mgr is None: if self.ExtensionManager: # pylint: disable=not-callable ext_mgr = self.ExtensionManager() else: raise Exception(_("Must specify an ExtensionManager class")) mapper = ProjectMapper() self.resources = {} self._setup_routes(mapper) self._setup_ext_routes(mapper, ext_mgr) self._setup_extensions(ext_mgr) super(APIRouter, self).__init__(mapper) def _setup_ext_routes(self, mapper, ext_mgr): for resource in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource.collection) wsgi_resource = wsgi.Resource(resource.controller) self.resources[resource.collection] = wsgi_resource kargs = dict( controller=wsgi_resource, collection=resource.collection_actions, member=resource.member_actions) if resource.parent: kargs['parent_resource'] = resource.parent mapper.resource(resource.collection, resource.collection, **kargs) if resource.custom_routes_fn: resource.custom_routes_fn(mapper, wsgi_resource) def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): ext_name = extension.extension.name collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource', {'ext_name': ext_name, 'collection': collection}) continue LOG.debug('Extension %(ext_name)s extending resource: ' '%(collection)s', {'ext_name': ext_name, 'collection': collection}) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller) def _setup_routes(self, mapper): raise NotImplementedError ================================================ FILE: delfin/api/common/wsgi.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from oslo_log import log from oslo_serialization import jsonutils import six import webob import webob.exc from delfin import exception from delfin.i18n import _ from delfin.wsgi import common as wsgi LOG = log.getLogger(__name__) SUPPORTED_CONTENT_TYPES = ( 'application/json', ) _MEDIA_TYPE_MAP = { 'application/json': 'json', } class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, *args, **kwargs): super(Request, self).__init__(*args, **kwargs) self._resource_cache = {} def cache_resource(self, resource_to_cache, id_attribute='id', name=None): """Cache the given resource. Allow API methods to cache objects, such as results from a DB query, to be used by API extensions within the same API request. The resource_to_cache can be a list or an individual resource, but ultimately resources are cached individually using the given id_attribute. Different resources types might need to be cached during the same request, they can be cached using the name parameter. For example: Controller 1: request.cache_resource(db_volumes, 'volumes') request.cache_resource(db_volume_types, 'types') Controller 2: db_volumes = request.cached_resource('volumes') db_type_1 = request.cached_resource_by_id('1', 'types') If no name is given, a default name will be used for the resource. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ if not isinstance(resource_to_cache, list): resource_to_cache = [resource_to_cache] if not name: name = self.path cached_resources = self._resource_cache.setdefault(name, {}) for resource in resource_to_cache: cached_resources[resource[id_attribute]] = resource def cached_resource(self, name=None): """Get the cached resources cached under the given resource name. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. :returns: a dict of id_attribute to the resource from the cached resources, an empty map if an empty collection was cached, or None if nothing has been cached yet under this name """ if not name: name = self.path if name not in self._resource_cache: # Nothing has been cached for this key yet return None return self._resource_cache[name] def cached_resource_by_id(self, resource_id, name=None): """Get a resource by ID cached under the given resource name. Allow an API extension to get a previously stored object within the same API request. This is basically a convenience method to lookup by ID on the dictionary of all cached resources. Note that the object data will be slightly stale. :returns: the cached resource or None if the item is not in the cache """ resources = self.cached_resource(name) if not resources: # Nothing has been cached yet for this key yet return None return resources.get(resource_id) def cache_db_items(self, key, items, item_key='id'): """Cache db items. Allow API methods to store objects from a DB query to be used by API extensions within the same API request. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ self.cache_resource(items, item_key, key) def get_db_items(self, key): """Get db item by key. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. """ return self.cached_resource(key) def get_db_item(self, key, item_key): """Get db item by key and item key. Allow an API extension to get a previously stored object within the same API request. Note that the object data will be slightly stale. """ return self.get_db_items(key).get(item_key) def cache_db_share_types(self, share_types): self.cache_db_items('share_types', share_types, 'id') def cache_db_share_type(self, share_type): self.cache_db_items('share_types', [share_type], 'id') def get_db_share_types(self): return self.get_db_items('share_types') def get_db_share_type(self, share_type_id): return self.get_db_item('share_types', share_type_id) def best_match_content_type(self): """Determine the requested response content-type.""" if 'delfin.best_content_type' not in self.environ: # Calculate the best MIME type content_type = None # Check URL path suffix parts = self.path.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in SUPPORTED_CONTENT_TYPES: content_type = possible_type if not content_type: content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) self.environ['delfin.best_content_type'] = (content_type or 'application/json') return self.environ['delfin.best_content_type'] def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header. """ if "Content-Type" not in self.headers: return None allowed_types = SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type) return content_type class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, six.text_type(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(msg) def default(self, datastring): return {'body': self._from_json(datastring)} class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): return six.b(jsonutils.dumps(data)) def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator def deserializers(**deserializers): """Attaches deserializers to a method. This decorator associates a dictionary of deserializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_deserializers'): func.wsgi_deserializers = {} func.wsgi_deserializers.update(deserializers) return func return decorator def response(code): """Attaches response code to a method. This decorator associates a response code with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): func.wsgi_code = code return func return decorator class ResponseObject(object): """Bundles a response object with appropriate serializers. Object that app methods may return in order to bind alternate serializers with a response object to be serialized. Its use is optional. """ def __init__(self, obj, code=None, headers=None, **serializers): """Binds serializers with an object. Takes keyword arguments akin to the @serializer() decorator for specifying serializers. Serializers specified will be given preference over default serializers or method-specific serializers on return. """ self.obj = obj self.serializers = serializers self._default_code = 200 self._code = code self._headers = headers or {} self.serializer = None self.media_type = None def __getitem__(self, key): """Retrieves a header with the given name.""" return self._headers[key.lower()] def __setitem__(self, key, value): """Sets a header with the given name to the given value.""" self._headers[key.lower()] = value def __delitem__(self, key): """Deletes the header with the given name.""" del self._headers[key.lower()] def _bind_method_serializers(self, meth_serializers): """Binds method serializers with the response object. Binds the method serializers with the response object. Serializers specified to the constructor will take precedence over serializers specified to this method. :param meth_serializers: A dictionary with keys mapping to response types and values containing serializer objects. """ # We can't use update because that would be the wrong # precedence for mtype, serializer in meth_serializers.items(): self.serializers.setdefault(mtype, serializer) def get_serializer(self, content_type, default_serializers=None): """Returns the serializer for the wrapped object. Returns the serializer for the wrapped object subject to the indicated content type. If no serializer matching the content type is attached, an appropriate serializer drawn from the default serializers will be used. If no appropriate serializer is available, raises InvalidContentType. """ default_serializers = default_serializers or {} try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in self.serializers: return mtype, self.serializers[mtype] else: return mtype, default_serializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type) def preserialize(self, content_type, default_serializers=None): """Prepares the serializer that will be used to serialize. Determines the serializer that will be used and prepares an instance of it for later call. This allows the serializer to be accessed by extensions for, e.g., template extension. """ mtype, serializer = self.get_serializer(content_type, default_serializers) self.media_type = mtype self.serializer = serializer() def attach(self, **kwargs): """Attach slave templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) def serialize(self, request, content_type, default_serializers=None): """Serializes the wrapped object. Utility method for serializing the wrapped object. Returns a webob.Response object. """ if self.serializer: serializer = self.serializer else: _mtype, _serializer = self.get_serializer(content_type, default_serializers) serializer = _serializer() response = webob.Response() response.status_int = self.code for hdr, value in self._headers.items(): response.headers[hdr] = six.text_type(value) response.headers['Content-Type'] = six.text_type(content_type) if self.obj is not None: response.body = serializer.serialize(self.obj) return response @property def code(self): """Retrieve the response status.""" return self._code or self._default_code @property def headers(self): """Retrieve the headers.""" return self._headers.copy() def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(msg) # Return the action and the decoded body... return list(decoded.keys())[0] class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation methods (or their extensions). Converts most exceptions to Fault exceptions, with the appropriate logging. """ def __enter__(self): return None def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.DelfinException): raise Fault(exception.ConvertedException(ex_value)) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error('Exception handling resource: %s', ex_value, exc_info=exc_info) exc = exception.BadRequest() raise Fault(exception.ConvertedException(exc)) elif isinstance(ex_value, Fault): LOG.info("Fault thrown: %s", ex_value) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info("HTTP exception thrown: %s", ex_value) raise Fault(ex_value) # We didn't handle the exception return False class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. Exceptions derived from webob.exc.HTTPException will be automatically wrapped in Fault() to provide API friendly error responses. """ support_api_request_version = True def __init__(self, controller, action_peek=None, **deserializers): """init method of Resource. :param controller: object that implement methods created by routes lib :param action_peek: dictionary of routines for peeking into an action request body to determine the desired action """ self.controller = controller default_deserializers = dict(json=JSONDeserializer) default_deserializers.update(deserializers) self.default_deserializers = default_deserializers self.default_serializers = dict(json=JSONDictSerializer) self.action_peek = dict(json=action_peek_json) self.action_peek.update(action_peek or {}) # Copy over the actions dictionary self.wsgi_actions = {} if controller: self.register_actions(controller) # Save a mapping of extensions self.wsgi_extensions = {} self.wsgi_action_extensions = {} def register_actions(self, controller): """Registers controller actions with this resource.""" actions = getattr(controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) def register_extensions(self, controller): """Registers controller extensions with this resource.""" extensions = getattr(controller, 'wsgi_extensions', []) for method_name, action_name in extensions: # Look up the extending method extension = getattr(controller, method_name) if action_name: # Extending an action... if action_name not in self.wsgi_action_extensions: self.wsgi_action_extensions[action_name] = [] self.wsgi_action_extensions[action_name].append(extension) else: # Extending a regular method if method_name not in self.wsgi_extensions: self.wsgi_extensions[method_name] = [] self.wsgi_extensions[method_name].append(extension) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" # NOTE(Vek): Check for get_action_args() override in the # controller if hasattr(self.controller, 'get_action_args'): return self.controller.get_action_args(request_environment) try: args = request_environment['wsgiorg.routing_args'][1].copy() except (KeyError, IndexError, AttributeError): return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args def get_body(self, request): try: content_type = request.get_content_type() except exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") return None, '' if not content_type: LOG.debug("No Content-Type provided in request") return None, '' if len(request.body) <= 0: LOG.debug("Empty body provided in request") return None, '' return content_type, request.body def deserialize(self, meth, content_type, body): meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in meth_deserializers: deserializer = meth_deserializers[mtype] else: deserializer = self.default_deserializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type) return deserializer().deserialize(body) def pre_process_extensions(self, extensions, request, action_args): # List of callables for post-processing extensions post = [] for ext in extensions: if inspect.isgeneratorfunction(ext): response = None # If it's a generator function, the part before the # yield is the preprocessing stage try: with ResourceExceptionHandler(): gen = ext(req=request, **action_args) response = next(gen) except Fault as ex: response = ex # We had a response... if response: return response, [] # No response, queue up generator for post-processing post.append(gen) else: # Regular functions only perform post-processing post.append(ext) # Run post-processing in the reverse order return None, reversed(post) def post_process_extensions(self, extensions, resp_obj, request, action_args): for ext in extensions: response = None if inspect.isgenerator(ext): # If it's a generator, run the second half of # processing try: with ResourceExceptionHandler(): response = ext.send(resp_obj) except StopIteration: # Normal exit of generator continue except Fault as ex: response = ex else: # Regular functions get post-processing... try: with ResourceExceptionHandler(): response = ext(req=request, resp_obj=resp_obj, **action_args) except Fault as ex: response = ex # We had a response... if response: return response return None @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s", {"method": request.method, "url": request.url}) # Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) content_type, body = self.get_body(request) accept = request.best_match_content_type() # NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept) def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): ex = exception.ConvertedException(exception.NotFound()) return Fault(ex) except KeyError as ex: ex = exception.ConvertedException( exception.NoSuchAction(ex.args[0])) return Fault(ex) except exception.MalformedRequestBody as ex: ex = exception.ConvertedException(ex) return Fault(ex) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType as ex: ex = exception.ConvertedException(ex) return Fault(ex) except exception.MalformedRequestBody as ex: ex = exception.ConvertedException(ex) return Fault(ex) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('delfin.context') if (context and project_id and (project_id != context.project_id)): ex = exception.ConvertedException(exception.MalformedRequestUrl()) return Fault(ex) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions(post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)s") % msg_dict except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s") % msg_dict LOG.info(msg) return response def get_method(self, request, action, content_type, body): """Look up the action-specific method and its extensions.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) except AttributeError: if (not self.wsgi_actions or action not in ['action', 'create', 'delete']): # Propagate the error raise else: return meth, self.wsgi_extensions.get(action, []) if action == 'action': # OK, it's an action; figure out which action... mtype = _MEDIA_TYPE_MAP.get(content_type) action_name = self.action_peek[mtype](body) LOG.debug("Action body: %s", body) else: action_name = action # Look up the action method return (self.wsgi_actions[action_name], self.wsgi_action_extensions.get(action_name, [])) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" return method(req=request, **action_args) def action(name): """Mark a function as an action. The given name will be taken as the action key in the body. This is also overloaded to allow extensions to provide non-extending definitions of create and delete operations. """ def decorator(func): func.wsgi_action = name return func return decorator def extends(*args, **kwargs): """Indicate a function extends an operation. Can be used as either:: @extends def index(...): pass or as:: @extends(action='resize') def _action_resize(...): pass """ def decorator(func): # Store enough information to find what we're extending func.wsgi_extends = (func.__name__, kwargs.get('action')) return func # If we have positional arguments, call the decorator if args: return decorator(*args) # OK, return the decorator instead return decorator class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds the wsgi_actions dictionary to the class.""" # Find all actions actions = {} extensions = [] # start with wsgi actions from base classes for base in bases: actions.update(getattr(base, 'wsgi_actions', {})) for key, value in cls_dict.items(): if not callable(value): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key elif getattr(value, 'wsgi_extends', None): extensions.append(value.wsgi_extends) # Add the actions and extensions to the class dict cls_dict['wsgi_actions'] = actions cls_dict['wsgi_extensions'] = extensions return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) @six.add_metaclass(ControllerMetaclass) class Controller(object): """Default controller.""" _view_builder_class = None def __init__(self, view_builder=None): """Initialize controller with a view builder instance.""" if view_builder: self._view_builder = view_builder elif self._view_builder_class: # pylint: disable=not-callable self._view_builder = self._view_builder_class() else: self._view_builder = None @staticmethod def is_valid_body(body, entity_name): if not (body and entity_name in body): return False def is_dict(d): try: d.get(None) return True except AttributeError: return False if not is_dict(body[entity_name]): return False return True class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. status_code = self.wrapped_exc.status_int fault_data = { 'error_code': self.wrapped_exc.error_code, 'error_msg': self.wrapped_exc.explanation, 'error_args': self.wrapped_exc.error_args} LOG.info("Exception response code: %(code)s, reason: %(reason)s", {'code': status_code, 'reason': fault_data}) if status_code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data['retryAfter'] = '%s' % retry content_type = req.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type _set_request_id_header(req, self.wrapped_exc.headers) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() def _set_request_id_header(req, headers): context = req.environ.get('delfin.context') if context: headers['x-compute-request-id'] = context.request_id ================================================ FILE: delfin/api/contrib/__init__.py ================================================ # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contrib contains extensions that are shipped with delfin. It can't be called 'extensions' because that causes namespacing problems. """ from oslo_config import cfg from oslo_log import log from delfin.api import extensions CONF = cfg.CONF LOG = log.getLogger(__name__) def standard_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, CONF.delfin_api_ext_list) ================================================ FILE: delfin/api/extensions.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from delfin.api.common import wsgi from delfin import exception CONF = cfg.CONF LOG = log.getLogger(__name__) class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ # The name of the extension, e.g., 'Fox In Socks' name = None # The alias for the extension, e.g., 'FOXNSOX' alias = None # Description comes from the docstring for the class # The timestamp when the extension was last updated, e.g., # '2011-01-22T13:25:27-06:00' updated = None def __init__(self, ext_mgr): """Register extension with the extension manager.""" ext_mgr.register(self) self.ext_mgr = ext_mgr def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_controller_extensions(self): """List of extensions.ControllerExtension extension objects. Controller extensions are used to extend existing controllers. """ controller_exts = [] return controller_exts class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager super(ExtensionsResource, self).__init__(None) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.name ext_data['alias'] = ext.alias ext_data['description'] = ext.__doc__ ext_data['updated'] = ext.updated ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.items(): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, req, id): try: # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions[id] except KeyError: raise exception.NotFound() return dict(extension=self._translate(ext)) def delete(self, req, id): raise exception.NotFound() def create(self, req): raise exception.NotFound() class ExtensionManager(object): """Load extensions from the configured extension path. See delfin/tests/api/extensions/foxinsocks/extension.py for an example extension implementation. """ def __init__(self): LOG.info('Initializing extension manager.') self.cls_list = CONF.delfin_api_extension self.extensions = {} self._load_extensions() def register(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.alias LOG.info('Loaded extension: %s', alias) if alias in self.extensions: raise exception.DuplicateExtension(alias) self.extensions[alias] = ext def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionsResource(self))) for ext in self.extensions.values(): try: resources.extend(ext.get_resources()) except AttributeError: # NOTE(dprince): Extension aren't required to have resource # extensions pass return resources def get_controller_extensions(self): """Returns a list of ControllerExtension objects.""" controller_exts = [] for ext in self.extensions.values(): try: get_ext_method = ext.get_controller_extensions except AttributeError: # NOTE(Vek): Extensions aren't required to have # controller extensions continue controller_exts.extend(get_ext_method()) return controller_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.name) LOG.debug('Ext alias: %s', extension.alias) LOG.debug('Ext description: %s', ' '.join(extension.__doc__.strip().split())) LOG.debug('Ext updated: %s', extension.updated) except AttributeError: LOG.exception("Exception loading extension.") return False return True def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug("Loading extension %s", ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug("Calling extension factory %s", ext_factory) factory(self) def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) # NOTE(thingee): Backwards compat for the old extension loader path. # We can drop this post-grizzly in the H release. old_contrib_path = ('delfin.api.common.share.contrib.' 'standard_extensions') new_contrib_path = 'delfin.api.contrib.standard_extensions' if old_contrib_path in extensions: LOG.warning('delfin_api_extension is set to deprecated path: ' '%s.', old_contrib_path) LOG.warning('Please set your flag or delfin.conf settings for ' 'delfin_api_extension to: %s.', new_contrib_path) extensions = [e.replace(old_contrib_path, new_contrib_path) for e in extensions] for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning('Failed to load extension %(ext_factory)s: ' '%(exc)s.', {"ext_factory": ext_factory, "exc": exc}) class ControllerExtension(object): """Extend core controllers of delfin OpenStack API. Provide a way to extend existing delfin OpenStack API core controllers. """ def __init__(self, extension, collection, controller): self.extension = extension self.collection = collection self.controller = controller class ResourceExtension(object): """Add top level resources to the OpenStack API in delfin.""" def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, custom_routes_fn=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.custom_routes_fn = custom_routes_fn def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py and .pyc files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py and .pyc if (ext not in ('.py', '.pyc')) or root == '__init__': continue # If .pyc and .py both exist, skip .pyc if ext == '.pyc' and ((root + '.py') in filenames): continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warning('Failed to load extension %(classpath)s: ' '%(exc)s.', {"classpath": classpath, "exc": exc}) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warning('Failed to load extension ' '%(ext_name)s: %(exc)s.', {"ext_name": ext_name, "exc": exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs ================================================ FILE: delfin/api/middlewares.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import webob.dec from delfin import context from delfin.wsgi import common as wsgi class ContextWrapper(wsgi.Middleware): """Add 'delfin.context' to req.environ""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): req.environ['delfin.context'] = context.RequestContext() return self.application ================================================ FILE: delfin/api/schemas/__init__.py ================================================ ================================================ FILE: delfin/api/schemas/access_info.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.api.validation import parameter_types update = { 'type': 'object', 'properties': { 'rest': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, 'required': ['host', 'port', 'username', 'password'], 'additionalProperties': False }, 'ssh': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'pub_key': {'type': 'string', 'minLength': 1, 'maxLength': 4096}, 'pub_key_type': parameter_types.host_key_type }, 'required': ['host', 'port', 'username'], 'additionalProperties': False }, 'cli': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, 'required': ['host', 'username', 'password'], 'additionalProperties': False }, 'smis': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'namespace': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, 'required': ['host', 'username', 'password'], 'additionalProperties': False }, 'extra_attributes': { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:. ]{1,255}$': { 'type': 'string', 'maxLength': 255 } } } }, 'anyOf': [ {'required': ['rest']}, {'required': ['ssh']}, {'required': ['cli']}, {'required': ['smis']} ], 'additionalProperties': False } ================================================ FILE: delfin/api/schemas/alert_source.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.api.validation import parameter_types # engineId is in range (5-32) octet which is 10-64 hex characters # If it is odd length, 0 will be prefixed to last octet, so minimum length is 9 put = { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'version': parameter_types.snmp_version, 'community_string': {'type': 'string', 'minLength': 1, 'maxLength': 32}, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 32}, 'security_level': parameter_types.snmp_security_level, 'auth_key': {'type': 'string', 'minLength': 8, 'maxLength': 65535}, 'auth_protocol': parameter_types.snmp_auth_protocol, 'privacy_protocol': parameter_types.snmp_privacy_protocol, 'privacy_key': {'type': 'string', 'minLength': 8, 'maxLength': 65535}, 'engine_id': {'type': 'string', 'minLength': 9, 'maxLength': 64}, 'context_name': {'type': 'string', 'minLength': 0, 'maxLength': 32}, 'retry_num': {'type': 'integer'}, 'expiration': {'type': 'integer'}, 'port': parameter_types.tcp_udp_port }, 'required': ['host', 'version'], 'additionalProperties': False, } ================================================ FILE: delfin/api/schemas/alerts.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # begin_time and end_time are time in milliseconds post = { 'type': 'object', 'properties': { 'begin_time': {'type': 'integer'}, 'end_time': {'type': 'integer'} }, 'additionalProperties': False, } ================================================ FILE: delfin/api/schemas/storage_capabilities_schema.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common.constants import ResourceType, StorageMetric, \ StoragePoolMetric, VolumeMetric, ControllerMetric, PortMetric, \ DiskMetric, FileSystemMetric STORAGE_CAPABILITIES_SCHEMA = { 'type': 'object', 'properties': { 'is_historic': {'type': 'boolean'}, 'performance_metric_retention_window': {'type': 'integer'}, 'resource_metrics': { 'type': 'object', 'properties': { ResourceType.STORAGE: { 'type': 'object', 'properties': { StorageMetric.THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric.THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric.RESPONSE_TIME .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.READ_RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric .READ_RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.WRITE_RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric .WRITE_RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric.IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.READ_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric .READ_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.WRITE_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric .WRITE_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.READ_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric.READ_IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StorageMetric.WRITE_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StorageMetric.WRITE_IOPS .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, }, 'additionalProperties': False }, ResourceType.STORAGE_POOL: { 'type': 'object', 'properties': { StoragePoolMetric.THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StoragePoolMetric .THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StoragePoolMetric.RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StoragePoolMetric .RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StoragePoolMetric.IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StoragePoolMetric.IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StoragePoolMetric.READ_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StoragePoolMetric .READ_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StoragePoolMetric.WRITE_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StoragePoolMetric .WRITE_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StoragePoolMetric.READ_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StoragePoolMetric.READ_IOPS .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, StoragePoolMetric.WRITE_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [StoragePoolMetric.WRITE_IOPS .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, }, 'additionalProperties': False }, ResourceType.VOLUME: { 'type': 'object', 'properties': { VolumeMetric.THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.RESPONSE_TIME .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.READ_RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric .READ_RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.WRITE_RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric .WRITE_RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.READ_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.READ_THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.WRITE_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric .WRITE_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.READ_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.READ_IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.WRITE_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.WRITE_IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.CACHE_HIT_RATIO.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.CACHE_HIT_RATIO .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.READ_CACHE_HIT_RATIO.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric .READ_CACHE_HIT_RATIO.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.WRITE_CACHE_HIT_RATIO.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric .WRITE_CACHE_HIT_RATIO.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.IO_SIZE.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.IO_SIZE.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.READ_IO_SIZE.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.READ_IO_SIZE .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, VolumeMetric.WRITE_IO_SIZE.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [VolumeMetric.WRITE_IO_SIZE .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, }, 'additionalProperties': False }, ResourceType.CONTROLLER: { 'type': 'object', 'properties': { ControllerMetric.THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric.THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, ControllerMetric.RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric .RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, ControllerMetric.IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric.IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, ControllerMetric.READ_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric .READ_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, ControllerMetric.WRITE_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric .WRITE_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, ControllerMetric.READ_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric.READ_IOPS .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, ControllerMetric.WRITE_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric.WRITE_IOPS .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, ControllerMetric.CPU_USAGE.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [ControllerMetric.CPU_USAGE .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, } }, 'additionalProperties': False }, ResourceType.PORT: { 'type': 'object', 'properties': { PortMetric.THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [PortMetric.THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, PortMetric.RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [PortMetric.RESPONSE_TIME .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, PortMetric.IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [PortMetric.IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, PortMetric.READ_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [PortMetric.READ_THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, PortMetric.WRITE_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [PortMetric.WRITE_THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, PortMetric.READ_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [PortMetric.READ_IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, PortMetric.WRITE_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [PortMetric.WRITE_IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, }, 'additionalProperties': False }, ResourceType.DISK: { 'type': 'object', 'properties': { DiskMetric.THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [DiskMetric.THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, DiskMetric.RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [DiskMetric.RESPONSE_TIME .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, DiskMetric.IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [DiskMetric.IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, DiskMetric.READ_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [DiskMetric.READ_IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, DiskMetric.WRITE_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [DiskMetric.WRITE_IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, DiskMetric.READ_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [DiskMetric.READ_THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, DiskMetric.WRITE_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [DiskMetric.WRITE_THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, }, 'additionalProperties': False }, ResourceType.FILESYSTEM: { 'type': 'object', 'properties': { FileSystemMetric.THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric.THROUGHPUT .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric.IOPS.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.READ_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric .READ_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.WRITE_THROUGHPUT.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric .WRITE_THROUGHPUT.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.READ_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric.READ_IOPS .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.WRITE_IOPS.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric.WRITE_IOPS .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.READ_RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric .READ_RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.WRITE_RESPONSE_TIME.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric .WRITE_RESPONSE_TIME.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.IO_SIZE.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric.IO_SIZE .unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.READ_IO_SIZE.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric .READ_IO_SIZE.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, FileSystemMetric.WRITE_IO_SIZE.name: { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': [FileSystemMetric .WRITE_IO_SIZE.unit] }, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, }, 'additionalProperties': False }, }, 'additionalProperties': False }, }, 'additionalProperties': False, 'required': ['is_historic'] } ================================================ FILE: delfin/api/schemas/storages.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'vendor': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'model': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'storage_name': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'rest': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, 'required': ['host', 'port', 'username', 'password'], 'additionalProperties': False }, 'ssh': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'pub_key': {'type': 'string', 'minLength': 1, 'maxLength': 4096}, 'pub_key_type': parameter_types.host_key_type }, 'required': ['host', 'port', 'username', 'password', 'pub_key'], 'additionalProperties': False }, 'cli': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, 'required': ['host', 'username', 'password'], 'additionalProperties': False }, 'smis': { 'type': 'object', 'properties': { 'host': parameter_types.hostname_or_ip_address, 'port': parameter_types.tcp_udp_port, 'username': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'password': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'namespace': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, 'required': ['host', 'username', 'password'], 'additionalProperties': False }, 'extra_attributes': { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:. ]{1,255}$': { 'type': 'string', 'maxLength': 255 } } } }, 'required': ['vendor', 'model'], 'anyOf': [ {'required': ['rest']}, {'required': ['ssh']}, {'required': ['cli']}, {'required': ['smis']} ], 'additionalProperties': False } ================================================ FILE: delfin/api/v1/__init__.py ================================================ ================================================ FILE: delfin/api/v1/access_info.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from delfin import db from delfin import cryptor from delfin.api import validation from delfin.api.common import wsgi from delfin.api.schemas import access_info as schema_access_info from delfin.api.views import access_info as access_info_viewer from delfin.db.sqlalchemy.models import AccessInfo from delfin.common import constants from delfin.drivers import api as driverapi class AccessInfoController(wsgi.Controller): def __init__(self): super(AccessInfoController, self).__init__() self._view_builder = access_info_viewer.ViewBuilder() self.driver_api = driverapi.API() def show(self, req, id): """Show access information by storage id.""" ctxt = req.environ['delfin.context'] access_info = db.access_info_get(ctxt, id) return self._view_builder.show(access_info) def _cm_access_info_update(self, ctxt, access_info, body): access_info_dict = copy.deepcopy(access_info) unused = ['created_at', 'updated_at', 'storage_name', 'storage_id', 'extra_attributes'] access_info_dict = AccessInfo.to_dict(access_info_dict) for field in unused: if access_info_dict.get(field): access_info_dict.pop(field) for access in constants.ACCESS_TYPE: if access_info_dict.get(access): access_info_dict.pop(access) access_info_list = db.access_info_get_all( ctxt, filters=access_info_dict) for cm_access_info in access_info_list: if cm_access_info['storage_id'] == access_info['storage_id']: continue for access in constants.ACCESS_TYPE: if cm_access_info.get(access): cm_access_info[access]['password'] = cryptor.decode( cm_access_info[access]['password']) if body.get(access): cm_access_info[access].update(body[access]) self.driver_api.update_access_info(ctxt, cm_access_info) @validation.schema(schema_access_info.update) def update(self, req, id, body): """Update storage access information.""" ctxt = req.environ.get('delfin.context') access_info = db.access_info_get(ctxt, id) self._cm_access_info_update(ctxt, access_info, body) for access in constants.ACCESS_TYPE: if access_info.get(access): access_info[access]['password'] = cryptor.decode( access_info[access]['password']) if body.get(access): access_info[access].update(body[access]) access_info = self.driver_api.update_access_info(ctxt, access_info) return self._view_builder.show(access_info) def show_all(self, req): """Show all access information.""" ctxt = req.environ.get('delfin.context') access_infos = db.access_info_get_all(ctxt) return self._view_builder.show_all(access_infos) def create_resource(): return wsgi.Resource(AccessInfoController()) ================================================ FILE: delfin/api/v1/alert_source.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from pyasn1.type.univ import OctetString from delfin import db, cryptor from delfin import exception from delfin.alert_manager import rpcapi from delfin.api import validation from delfin.api.common import wsgi from delfin.api.schemas import alert_source as schema_alert from delfin.api.views import alert_source as alert_view from delfin.common import constants LOG = log.getLogger(__name__) SNMPv3_keys = ('username', 'auth_key', 'security_level', 'auth_protocol', 'privacy_protocol', 'privacy_key', 'engine_id') class AlertSourceController(wsgi.Controller): def __init__(self): super().__init__() self.alert_rpcapi = rpcapi.AlertAPI() @wsgi.response(200) @validation.schema(schema_alert.put) def put(self, req, id, body): """Create a new alert source or update an exist one.""" ctx = req.environ['delfin.context'] alert_source = body alert_source["storage_id"] = id db.storage_get(ctx, id) alert_source = self._input_check(alert_source) snmp_config_to_del = self._get_snmp_config_brief(ctx, id) if snmp_config_to_del is not None: alert_source = db.alert_source_update(ctx, id, alert_source) else: alert_source = db.alert_source_create(ctx, alert_source) snmp_config_to_add = alert_source self.alert_rpcapi.sync_snmp_config(ctx, snmp_config_to_del, snmp_config_to_add) return alert_view.build_alert_source(alert_source.to_dict()) @wsgi.response(200) def show(self, req, id): ctx = req.environ['delfin.context'] alert_source = db.alert_source_get(ctx, id) return alert_view.build_alert_source(alert_source.to_dict()) @wsgi.response(200) def delete(self, req, id): ctx = req.environ['delfin.context'] snmp_config_to_del = self._get_snmp_config_brief(ctx, id) if snmp_config_to_del is not None: self.alert_rpcapi.sync_snmp_config(ctx, snmp_config_to_del, None) db.alert_source_delete(ctx, id) else: raise exception.AlertSourceNotFound(id) def _input_check(self, alert_source): version = alert_source.get('version') if version.lower() == 'snmpv3': user_name = alert_source.get('username') security_level = alert_source.get('security_level') engine_id = alert_source.get('engine_id') # Validate engine_id, check octet string can be formed from it if engine_id: try: OctetString.fromHexString(engine_id) except (TypeError, ValueError): msg = "engine_id should be a set of octets in " \ "hexadecimal format." raise exception.InvalidInput(msg) if not user_name or not security_level: msg = "If snmp version is SNMPv3, then username, " \ "security_level are required." raise exception.InvalidInput(msg) if security_level == constants.SecurityLevel.AUTHNOPRIV\ or security_level == constants.SecurityLevel.AUTHPRIV: auth_protocol = alert_source.get('auth_protocol') auth_key = alert_source.get('auth_key') if not auth_protocol or not auth_key: msg = "If snmp version is SNMPv3 and security_level is " \ "authPriv or authNoPriv, auth_protocol and " \ "auth_key are required." raise exception.InvalidInput(msg) alert_source['auth_key'] = cryptor.encode( alert_source['auth_key']) if security_level == constants.SecurityLevel.AUTHPRIV: privacy_protocol = alert_source.get('privacy_protocol') privacy_key = alert_source.get('privacy_key') if not privacy_protocol or not privacy_key: msg = "If snmp version is SNMPv3 and security_level" \ " is authPriv, privacy_protocol and " \ "privacy_key are required." raise exception.InvalidInput(msg) alert_source['privacy_key'] = cryptor.encode( alert_source['privacy_key']) else: alert_source['privacy_key'] = None alert_source['privacy_protocol'] = None else: alert_source['auth_key'] = None alert_source['auth_protocol'] = None alert_source['privacy_key'] = None alert_source['privacy_protocol'] = None # Clear keys for other versions. alert_source['community_string'] = None else: community_string = alert_source.get('community_string') if not community_string: msg = "If snmp version is SNMPv1 or SNMPv2c, " \ "community_string is required." raise exception.InvalidInput(msg) alert_source['community_string'] = cryptor.encode( alert_source['community_string']) # Clear keys for SNMPv3 for k in SNMPv3_keys: alert_source[k] = None return alert_source def _get_snmp_config_brief(self, ctx, storage_id): """ Get snmp configuration that will be used to delete from trap receiver. Only community_index(storage_id) required for snmp v1/v2 deletion, user_name and engine_id are required for snmp v3. So here we only get those required parameters. Return None if configuration not found. """ try: alert_source = db.alert_source_get(ctx, storage_id) snmp_config = {"storage_id": alert_source["storage_id"], "version": alert_source["version"]} if snmp_config["version"].lower() == "snmpv3": snmp_config["username"] = alert_source["username"] snmp_config["engine_id"] = alert_source["engine_id"] return snmp_config except exception.AlertSourceNotFound: return None def _decrypt_auth_key(self, alert_source): auth_key = alert_source.get('auth_key', None) privacy_key = alert_source.get('privacy_key', None) if auth_key: alert_source['auth_key'] = cryptor.decode(auth_key) if privacy_key: alert_source['privacy_key'] = cryptor.decode(privacy_key) return alert_source def show_all(self, req): """Show all snmp configs.""" ctx = req.environ['delfin.context'] snmp_configs = db.alert_source_get_all(ctx) return alert_view.show_all_snmp_configs(snmp_configs) def create_resource(): return wsgi.Resource(AlertSourceController()) ================================================ FILE: delfin/api/v1/alerts.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin import db from delfin import exception from delfin.api import validation from delfin.api.common import wsgi from delfin.api.schemas import alerts as schema_alerts from delfin.api.views import alerts as alerts_view from delfin.common import alert_util from delfin.drivers import api as driver_manager from delfin.task_manager import rpcapi as task_rpcapi LOG = log.getLogger(__name__) class AlertController(wsgi.Controller): def __init__(self): super().__init__() self.task_rpcapi = task_rpcapi.TaskAPI() self.driver_manager = driver_manager.API() @wsgi.response(200) def show(self, req, id): ctx = req.environ['delfin.context'] query_para = {} query_para.update(req.GET) try: begin_time = None end_time = None if query_para.get('begin_time'): begin_time = int(query_para.get('begin_time')) if query_para.get('end_time'): end_time = int(query_para.get('end_time')) except Exception: msg = "begin_time and end_time should be integer values in " \ "milliseconds." raise exception.InvalidInput(msg) # When both begin_time and end_time are provided, end_time should # be greater than begin_time if begin_time and end_time and end_time <= begin_time: msg = "end_time should be greater than begin_time." raise exception.InvalidInput(msg) storage = db.storage_get(ctx, id) alert_list = self.driver_manager.list_alerts(ctx, id, query_para) # Update storage attributes in each alert model for alert in alert_list: alert_util.fill_storage_attributes(alert, storage) return alerts_view.build_alerts(alert_list) @wsgi.response(200) def delete(self, req, id, sequence_number): ctx = req.environ['delfin.context'] _ = db.storage_get(ctx, id) self.driver_manager.clear_alert(ctx, id, sequence_number) @validation.schema(schema_alerts.post) @wsgi.response(200) def sync(self, req, id, body): ctx = req.environ['delfin.context'] # begin_time and end_time are optional parameters begin_time = body.get('begin_time') end_time = body.get('end_time') # When both begin_time and end_time are provided, end_time should # be greater than begin_time if begin_time and end_time and end_time <= begin_time: msg = "end_time should be greater than begin_time." raise exception.InvalidInput(msg) # Check for the storage existence _ = db.storage_get(ctx, id) query_para = {'begin_time': body.get('begin_time'), 'end_time': body.get('end_time')} # Trigger asynchronous alert syncing from storage backend self.task_rpcapi.sync_storage_alerts(ctx, id, query_para) def create_resource(): return wsgi.Resource(AlertController()) ================================================ FILE: delfin/api/v1/controllers.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import controllers as controller_view class ControllerController(wsgi.Controller): def __init__(self): super(ControllerController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'native_controller_id'] def _get_controllers_search_options(self): """Return controllers search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_controllers_search_options()) controllers = db.controller_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return controller_view.build_controllers(controllers) def show(self, req, id): ctxt = req.environ['delfin.context'] controller = db.controller_get(ctxt, id) return controller_view.build_controller(controller) def create_resource(): return wsgi.Resource(ControllerController()) ================================================ FILE: delfin/api/v1/disks.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import disks as disk_view class DiskController(wsgi.Controller): def __init__(self): super(DiskController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'native_disk_id'] def _get_disks_search_options(self): """Return disks search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_disks_search_options()) disks = db.disk_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return disk_view.build_disks(disks) def show(self, req, id): ctxt = req.environ['delfin.context'] disk = db.disk_get(ctxt, id) return disk_view.build_disk(disk) def create_resource(): return wsgi.Resource(DiskController()) ================================================ FILE: delfin/api/v1/filesystems.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import filesystems as filesystem_view class FilesystemController(wsgi.Controller): def __init__(self): super(FilesystemController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'native_filesystem_id'] def _get_fs_search_options(self): """Return filesystems search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_fs_search_options()) filesystems = db.filesystem_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return filesystem_view.build_filesystems(filesystems) def show(self, req, id): ctxt = req.environ['delfin.context'] filesystem = db.filesystem_get(ctxt, id) return filesystem_view.build_filesystem(filesystem) def create_resource(): return wsgi.Resource(FilesystemController()) ================================================ FILE: delfin/api/v1/masking_views.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import masking_views class MaskingViewController(wsgi.Controller): def __init__(self): super(MaskingViewController, self).__init__() self.search_options = ['name', 'id', 'storage_id', 'native_storage_host_group_id', 'native_storage_port_group_id', 'native_storage_volume_group_id', 'native_storage_host_id', 'native_volume_id', 'native_masking_view_id'] def _get_masking_view_search_options(self): """Return masking view search options allowed .""" return self.search_options def show(self, req, id): ctxt = req.environ['delfin.context'] query_params = {"storage_id": id} query_params.update(req.GET) # Update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # Strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_masking_view_search_options()) masking_view_lists = db.masking_views_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return masking_views.build_masking_views(masking_view_lists) def create_resource(): return wsgi.Resource(MaskingViewController()) ================================================ FILE: delfin/api/v1/port_groups.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import port_groups as port_group_view class PortGroupController(wsgi.Controller): def __init__(self): super(PortGroupController, self).__init__() self.search_options = ['name', 'id', 'storage_id', 'native_port_group_id'] def _get_port_group_search_options(self): """Return port group search options allowed .""" return self.search_options def show(self, req, id): ctxt = req.environ['delfin.context'] query_params = {"storage_id": id} query_params.update(req.GET) # Update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # Strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_port_group_search_options()) port_groups = db.port_groups_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) # Get Port Group to Port relation from DB for port_group in port_groups: params = { "native_port_group_id": port_group['native_port_group_id'] } ports = db.port_grp_port_rels_get_all( ctxt, filters=params) native_port_id_list = [] for port in ports: native_port_id_list.append(port['native_port_id']) port_group['ports'] = native_port_id_list return port_group_view.build_port_groups(port_groups) def create_resource(): return wsgi.Resource(PortGroupController()) ================================================ FILE: delfin/api/v1/ports.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import ports as port_view class PortController(wsgi.Controller): def __init__(self): super(PortController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'wwn', 'native_controller_id', 'native_port_id'] def _get_ports_search_options(self): """Return ports search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_ports_search_options()) ports = db.port_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return port_view.build_ports(ports) def show(self, req, id): ctxt = req.environ['delfin.context'] port = db.port_get(ctxt, id) return port_view.build_port(port) def create_resource(): return wsgi.Resource(PortController()) ================================================ FILE: delfin/api/v1/qtrees.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import qtrees as qtree_view class QtreeController(wsgi.Controller): def __init__(self): super(QtreeController, self).__init__() self.search_options = ['name', 'state', 'id', 'storage_id', 'native_filesystem_id', 'native_qtree_id'] def _get_qtrees_search_options(self): """Return qtrees search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_qtrees_search_options()) qtrees = db.qtree_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return qtree_view.build_qtrees(qtrees) def show(self, req, id): ctxt = req.environ['delfin.context'] qtree = db.qtree_get(ctxt, id) return qtree_view.build_qtree(qtree) def create_resource(): return wsgi.Resource(QtreeController()) ================================================ FILE: delfin/api/v1/quotas.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import quotas as quota_view class QuotaController(wsgi.Controller): def __init__(self): super(QuotaController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'type', 'native_quota_id', 'native_filesystem_id', 'native_qtree_id', 'user_group_name'] def _get_fs_search_options(self): """Return quotas search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_fs_search_options()) quotas = db.quota_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return quota_view.build_quotas(quotas) def show(self, req, id): ctxt = req.environ['delfin.context'] quota = db.quota_get(ctxt, id) return quota_view.build_quota(quota) def create_resource(): return wsgi.Resource(QuotaController()) ================================================ FILE: delfin/api/v1/router.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.api import common from delfin.api import extensions from delfin.api.v1 import access_info from delfin.api.v1 import alert_source from delfin.api.v1 import alerts from delfin.api.v1 import controllers from delfin.api.v1 import disks from delfin.api.v1 import filesystems from delfin.api.v1 import ports from delfin.api.v1 import qtrees from delfin.api.v1 import quotas from delfin.api.v1 import shares from delfin.api.v1 import storage_pools from delfin.api.v1 import storages from delfin.api.v1 import volumes from delfin.api.v1 import storage_hosts from delfin.api.v1 import storage_host_initiators from delfin.api.v1 import storage_host_groups from delfin.api.v1 import port_groups from delfin.api.v1 import volume_groups from delfin.api.v1 import masking_views class APIRouter(common.APIRouter): ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper): mapper.redirect("", "/") self.resources['storages'] = storages.create_resource() mapper.resource("storage", "storages", controller=self.resources['storages'], member={'sync': 'POST'}) mapper.connect("storages", "/storages/sync", controller=self.resources['storages'], action="sync_all", conditions={"method": ["POST"]}) mapper.connect("storages", "/storages/{id}/capabilities", controller=self.resources['storages'], action="get_capabilities", conditions={"method": ["GET"]}) self.resources['access_info'] = access_info.create_resource() mapper.connect("storages", "/storages/{id}/access-info", controller=self.resources['access_info'], action="show", conditions={"method": ["GET"]}) mapper.connect("storages", "/storages/{id}/access-info", controller=self.resources['access_info'], action="update", conditions={"method": ["PUT"]}) mapper.connect("storages", "/access-infos", controller=self.resources['access_info'], action="show_all", conditions={"method": ["GET"]}) self.resources['alert_sources'] = alert_source.create_resource() mapper.connect("storages", "/storages/{id}/snmp-config", controller=self.resources['alert_sources'], action="put", conditions={"method": ["PUT"]}) mapper.connect("storages", "/storages/{id}/snmp-config", controller=self.resources['alert_sources'], action="show", conditions={"method": ["GET"]}) mapper.connect("storages", "/storages/{id}/snmp-config", controller=self.resources['alert_sources'], action="delete", conditions={"method": ["DELETE"]}) mapper.connect("storages", "/snmp-configs", controller=self.resources['alert_sources'], action="show_all", conditions={"method": ["GET"]}) self.resources['alerts'] = alerts.create_resource() mapper.connect("storages", "/storages/{id}/alerts/{sequence_number}", controller=self.resources['alerts'], action="delete", conditions={"method": ["DELETE"]}) mapper.connect("storages", "/storages/{id}/alerts", controller=self.resources['alerts'], action="show", conditions={"method": ["GET"]}) mapper.connect("storages", "/storages/{id}/alerts/sync", controller=self.resources['alerts'], action="sync", conditions={"method": ["POST"]}) self.resources['storage-pools'] = storage_pools.create_resource() mapper.resource("storage-pool", "storage-pools", controller=self.resources['storage-pools']) self.resources['volumes'] = volumes.create_resource() mapper.resource("volume", "volumes", controller=self.resources['volumes']) self.resources['controllers'] = controllers.create_resource() mapper.resource("controller", "controllers", controller=self.resources['controllers']) self.resources['ports'] = ports.create_resource() mapper.resource("port", "ports", controller=self.resources['ports']) self.resources['disks'] = disks.create_resource() mapper.resource("disk", "disks", controller=self.resources['disks']) self.resources['filesystems'] = filesystems.create_resource() mapper.resource("filesystems", "filesystems", controller=self.resources['filesystems']) self.resources['qtrees'] = qtrees.create_resource() mapper.resource("qtrees", "qtrees", controller=self.resources['qtrees']) self.resources['quotas'] = quotas.create_resource() mapper.resource("quotas", "quotas", controller=self.resources['quotas']) self.resources['shares'] = shares.create_resource() mapper.resource("shares", "shares", controller=self.resources['shares']) self.resources['storage_host_initiators'] \ = storage_host_initiators.create_resource() mapper.connect("storages", "/storages/{id}/storage-host-initiators", controller=self.resources['storage_host_initiators'], action="show", conditions={"method": ["GET"]}) self.resources['storage_hosts'] = storage_hosts.create_resource() mapper.connect("storages", "/storages/{id}/storage-hosts", controller=self.resources['storage_hosts'], action="show", conditions={"method": ["GET"]}) self.resources['storage_host_groups'] \ = storage_host_groups.create_resource() mapper.connect("storages", "/storages/{id}/storage-host-groups", controller=self.resources['storage_host_groups'], action="show", conditions={"method": ["GET"]}) self.resources['port_groups'] \ = port_groups.create_resource() mapper.connect("storages", "/storages/{id}/port-groups", controller=self.resources['port_groups'], action="show", conditions={"method": ["GET"]}) self.resources['volume_groups'] \ = volume_groups.create_resource() mapper.connect("storages", "/storages/{id}/volume-groups", controller=self.resources['volume_groups'], action="show", conditions={"method": ["GET"]}) self.resources['masking_views'] \ = masking_views.create_resource() mapper.connect("storages", "/storages/{id}/masking-views", controller=self.resources['masking_views'], action="show", conditions={"method": ["GET"]}) ================================================ FILE: delfin/api/v1/shares.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import shares as share_view class ShareController(wsgi.Controller): def __init__(self): super(ShareController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'native_share_id'] def _get_fs_search_options(self): """Return shares search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_fs_search_options()) shares = db.share_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return share_view.build_shares(shares) def show(self, req, id): ctxt = req.environ['delfin.context'] share = db.share_get(ctxt, id) return share_view.build_share(share) def create_resource(): return wsgi.Resource(ShareController()) ================================================ FILE: delfin/api/v1/storage_host_groups.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import storage_host_groups as storage_host_group_view class StorageHostGroupController(wsgi.Controller): def __init__(self): super(StorageHostGroupController, self).__init__() self.search_options = ['name', 'id', 'storage_id', 'native_storage_host_group_id'] def _get_storage_host_group_search_options(self): """Return storage host group search options allowed .""" return self.search_options def show(self, req, id): ctxt = req.environ['delfin.context'] query_params = {"storage_id": id} query_params.update(req.GET) # Update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # Strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_storage_host_group_search_options()) storage_host_groups = db.storage_host_groups_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) # Get Storage Host Group to Host relation from DB for host_group in storage_host_groups: params = { "native_storage_host_group_id": host_group['native_storage_host_group_id'] } hosts = db.storage_host_grp_host_rels_get_all( ctxt, filters=params) native_storage_host_id_list = [] for host in hosts: native_storage_host_id_list.append( host['native_storage_host_id']) host_group['storage_hosts'] = native_storage_host_id_list return storage_host_group_view\ .build_storage_host_groups(storage_host_groups) def create_resource(): return wsgi.Resource(StorageHostGroupController()) ================================================ FILE: delfin/api/v1/storage_host_initiators.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import storage_host_initiators as \ storage_host_initiator_view class StorageHostInitiatorController(wsgi.Controller): def __init__(self): super(StorageHostInitiatorController, self).__init__() self.search_options = ['name', 'status', 'wwn', 'id', 'storage_id', 'native_storage_host_id', 'native_storage_host_initiator_id'] def _get_storage_host_initiator_search_options(self): """Return storage host initiator search options allowed .""" return self.search_options def show(self, req, id): ctxt = req.environ['delfin.context'] query_params = {"storage_id": id} query_params.update(req.GET) # Update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # Strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_storage_host_initiator_search_options()) storage_host_initiators = db.storage_host_initiators_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return storage_host_initiator_view.build_storage_host_initiators( storage_host_initiators) def create_resource(): return wsgi.Resource(StorageHostInitiatorController()) ================================================ FILE: delfin/api/v1/storage_hosts.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import storage_hosts as storage_host_view class StorageHostController(wsgi.Controller): def __init__(self): super(StorageHostController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'native_storage_host_id'] def _get_storage_host_search_options(self): """Return storage host search options allowed .""" return self.search_options def _fill_storage_host_initiators(self, ctxt, storage_host, storage_id): """Fills initiator list for storage host.""" storage_host_initiators = db.storage_host_initiators_get_all( ctxt, filters={"storage_id": storage_id, "native_storage_host_id": storage_host['native_storage_host_id']}) storage_host_initiator_list = [] for storage_host_initiator in storage_host_initiators: storage_host_initiator_list.append( storage_host_initiator['native_storage_host_initiator_id']) return storage_host_initiator_list def show(self, req, id): ctxt = req.environ['delfin.context'] query_params = {"storage_id": id} query_params.update(req.GET) # Update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # Strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_storage_host_search_options()) storage_hosts = db.storage_hosts_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) for storage_host in storage_hosts: storage_host['storage_host_initiators'] \ = self._fill_storage_host_initiators(ctxt, storage_host, id) return storage_host_view.build_storage_hosts(storage_hosts) def create_resource(): return wsgi.Resource(StorageHostController()) ================================================ FILE: delfin/api/v1/storage_pools.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import storage_pools as storage_pool_view class StoragePoolController(wsgi.Controller): def __init__(self): super(StoragePoolController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'native_storage_pool_id'] def _get_storage_pools_search_options(self): """Return storage_pools search options allowed .""" return self.search_options def show(self, req, id): ctxt = req.environ['delfin.context'] pool = db.storage_pool_get(ctxt, id) return storage_pool_view.build_storage_pool(pool) def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_storage_pools_search_options()) storage_pools = db.storage_pool_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return storage_pool_view.build_storage_pools(storage_pools) def create_resource(): return wsgi.Resource(StoragePoolController()) ================================================ FILE: delfin/api/v1/storages.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import six from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from delfin import coordination from delfin import db from delfin import exception from delfin.api import api_utils from delfin.api import validation from delfin.api.common import wsgi from delfin.api.schemas import storages as schema_storages from delfin.api.views import storages as storage_view from delfin.common import constants from delfin.drivers import api as driverapi from delfin.i18n import _ from delfin.task_manager import perf_job_controller from delfin.task_manager import rpcapi as task_rpcapi from delfin.task_manager.tasks import resources LOG = log.getLogger(__name__) CONF = cfg.CONF class StorageController(wsgi.Controller): def __init__(self): super().__init__() self.task_rpcapi = task_rpcapi.TaskAPI() self.driver_api = driverapi.API() self.search_options = ['name', 'vendor', 'model', 'status', 'serial_number'] def _get_storages_search_options(self): """Return storages search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_storages_search_options()) storages = db.storage_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return storage_view.build_storages(storages) def show(self, req, id): ctxt = req.environ['delfin.context'] storage = db.storage_get(ctxt, id) return storage_view.build_storage(storage) @wsgi.response(201) @validation.schema(schema_storages.create) def create(self, req, body): """Register a new storage device.""" ctxt = req.environ['delfin.context'] access_info_dict = body # Lock to avoid synchronous creating. for access in constants.ACCESS_TYPE: if access_info_dict.get(access) is not None: host = access_info_dict.get(access).get('host') break lock_name = 'storage-create-' + host lock = coordination.Lock(lock_name) with lock: if self._storage_exist(ctxt, access_info_dict): raise exception.StorageAlreadyExists() storage = self.driver_api.discover_storage(ctxt, access_info_dict) # Registration success, sync resource collection for this storage try: self.sync(req, storage['id']) # Post registration, trigger alert sync self.task_rpcapi.sync_storage_alerts(ctxt, storage['id'], query_para=None) except Exception as e: # Unexpected error occurred, while syncing resources. msg = _('Failed to sync resources for storage: %(storage)s. ' 'Error: %(err)s') % {'storage': storage['id'], 'err': e} LOG.error(msg) try: # Trigger Performance monitoring capabilities = self.driver_api.get_capabilities( context=ctxt, storage_id=storage['id']) validation.validate_capabilities(capabilities) perf_job_controller.create_perf_job(ctxt, storage['id'], capabilities) except exception.EmptyResourceMetrics: msg = _("Resource metric provided by capabilities is empty for " "storage: %s") % storage['id'] LOG.info(msg) except Exception as e: # Unexpected error occurred, while performance monitoring. msg = _('Failed to trigger performance monitoring for storage: ' '%(storage)s. Error: %(err)s') % {'storage': storage['id'], 'err': six.text_type(e)} LOG.error(msg) return storage_view.build_storage(storage) @wsgi.response(202) def delete(self, req, id): ctxt = req.environ['delfin.context'] storage = db.storage_get(ctxt, id) for subclass in resources.StorageResourceTask.__subclasses__(): self.task_rpcapi.remove_storage_resource( ctxt, storage['id'], subclass.__module__ + '.' + subclass.__name__) self.task_rpcapi.remove_storage_in_cache(ctxt, storage['id']) perf_job_controller.delete_perf_job(ctxt, storage['id']) @wsgi.response(202) def sync_all(self, req): """ :param req: :return: it's a Asynchronous call. so return 202 on success. sync_all api performs the storage device info, storage_pool, volume etc. tasks on each registered storage device. """ ctxt = req.environ['delfin.context'] storages = db.storage_get_all(ctxt) LOG.debug("Total {0} registered storages found in database". format(len(storages))) resource_count = len(resources.StorageResourceTask.__subclasses__()) for storage in storages: try: _set_synced_if_ok(ctxt, storage['id'], resource_count) except exception.InvalidInput as e: LOG.warning('Can not start new sync task for %s, reason is %s' % (storage['id'], e.msg)) continue else: for subclass in \ resources.StorageResourceTask.__subclasses__(): self.task_rpcapi.sync_storage_resource( ctxt, storage['id'], subclass.__module__ + '.' + subclass.__name__) @wsgi.response(202) def sync(self, req, id): """ :param req: :param id: :return: """ ctxt = req.environ['delfin.context'] storage = db.storage_get(ctxt, id) resource_count = len(resources.StorageResourceTask.__subclasses__()) _set_synced_if_ok(ctxt, storage['id'], resource_count) for subclass in resources.StorageResourceTask.__subclasses__(): self.task_rpcapi.sync_storage_resource( ctxt, storage['id'], subclass.__module__ + '.' + subclass.__name__) def _storage_exist(self, context, access_info): access_info_dict = copy.deepcopy(access_info) access_info_list = access_info_filter( context, access_info_dict) for _access_info in access_info_list: try: storage = db.storage_get(context, _access_info['storage_id']) if storage: LOG.error("Storage %s has same access " "information." % storage['id']) return True except exception.StorageNotFound: # Suppose storage was not saved successfully after access # information was saved in database when registering storage. # Therefore, removing access info if storage doesn't exist to # ensure the database has no residual data. LOG.debug("Remove residual access information.") db.access_info_delete(context, _access_info['storage_id']) return False @wsgi.response(200) def get_capabilities(self, req, id): """ The API fetches capabilities from driver associated with the storage device. """ # Check and fetch storage with storage_id ctx = req.environ['delfin.context'] storage_info = db.storage_get(ctx, id) # Fetch supported driver's capability capabilities = self.driver_api. \ get_capabilities(ctx, storage_info['id']) # validate capabilities validation.validate_capabilities(capabilities) return storage_view.build_capabilities(storage_info, capabilities) def create_resource(): return wsgi.Resource(StorageController()) @coordination.synchronized('{storage_id}') def _set_synced_if_ok(context, storage_id, resource_count): try: storage = db.storage_get(context, storage_id) except exception.StorageNotFound: msg = 'Storage %s not found when try to set sync_status' \ % storage_id raise exception.InvalidInput(message=msg) else: last_update = storage['updated_at'] or storage['created_at'] current_time = timeutils.utcnow() interval = (current_time - last_update).seconds # If last synchronization was within # CONF.sync_task_expiration(in seconds), and the sync status # is bigger than 0, it means some sync task is still running, # the new sync task should not launch if interval < CONF.sync_task_expiration and \ storage['sync_status'] > 0: raise exception.StorageIsSyncing(storage['id']) storage['sync_status'] = resource_count * constants.ResourceSync.START storage['updated_at'] = current_time db.storage_update(context, storage['id'], storage) def access_info_filter(context, access_info): access_info_dict = copy.deepcopy(access_info) for access in constants.ACCESS_TYPE: if access_info_dict.get(access): access_info_dict.pop(access) # Check if storage is registered access_info_list = db.access_info_get_all(context, filters=access_info_dict) filtered_list = [] for access_info_db in access_info_list: match = True for access in constants.ACCESS_TYPE: access_filter = access_info.get(access) access_db = access_info_db.get(access) if match and access_filter: if not access_db or\ access_filter['host'] != access_db['host'] or\ access_filter['port'] != access_db['port']: match = False break if match: filtered_list.append(access_info_db) return filtered_list ================================================ FILE: delfin/api/v1/volume_groups.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import volume_groups as volume_group_view class VolumeGroupController(wsgi.Controller): def __init__(self): super(VolumeGroupController, self).__init__() self.search_options = ['name', 'id', 'storage_id', 'native_volume_group_id'] def _get_volume_group_search_options(self): """Return volume group search options allowed .""" return self.search_options def show(self, req, id): ctxt = req.environ['delfin.context'] query_params = {"storage_id": id} query_params.update(req.GET) # Update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # Strip out options except supported search options api_utils.remove_invalid_options( ctxt, query_params, self._get_volume_group_search_options()) volume_groups = db.volume_groups_get_all( ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) # Get Volume Group to Volume relation from DB for volume_group in volume_groups: params = { "native_volume_group_id": volume_group['native_volume_group_id'] } volumes = db.vol_grp_vol_rels_get_all( ctxt, filters=params) native_volume_id_list = [] for volume in volumes: native_volume_id_list.append(volume['native_volume_id']) volume_group['volumes'] = native_volume_id_list return volume_group_view.build_volume_groups(volume_groups) def create_resource(): return wsgi.Resource(VolumeGroupController()) ================================================ FILE: delfin/api/v1/volumes.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import db from delfin.api import api_utils from delfin.api.common import wsgi from delfin.api.views import volumes as volume_view class VolumeController(wsgi.Controller): def __init__(self): super(VolumeController, self).__init__() self.search_options = ['name', 'status', 'id', 'storage_id', 'wwn', 'native_volume_id', 'native_storage_pool_id'] def _get_volumes_search_options(self): """Return volumes search options allowed .""" return self.search_options def index(self, req): ctxt = req.environ['delfin.context'] query_params = {} query_params.update(req.GET) # update options other than filters sort_keys, sort_dirs = api_utils.get_sort_params(query_params) marker, limit, offset = api_utils.get_pagination_params(query_params) # strip out options except supported search options api_utils.remove_invalid_options(ctxt, query_params, self._get_volumes_search_options()) volumes = db.volume_get_all(ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset) return volume_view.build_volumes(volumes) def show(self, req, id): ctxt = req.environ['delfin.context'] volume = db.volume_get(ctxt, id) return volume_view.build_volume(volume) def create_resource(): return wsgi.Resource(VolumeController()) ================================================ FILE: delfin/api/validation/__init__.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Request Body validating middleware. """ import functools from delfin.api.validation import validators from delfin.api.schemas.storage_capabilities_schema import \ STORAGE_CAPABILITIES_SCHEMA from delfin import exception def schema(request_body_schema): """Register a schema to validate request body. Registered schema will be used for validating request body just before API method executing. :param dict request_body_schema: a schema to validate request body. """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): schema_validator = validators._SchemaValidator(request_body_schema) schema_validator.validate(kwargs['body']) return func(*args, **kwargs) return wrapper return add_validator def validate_capabilities(capabilities): if not capabilities: raise exception.StorageCapabilityNotSupported() schema_validator = validators._SchemaValidator(STORAGE_CAPABILITIES_SCHEMA) try: schema_validator.validate(capabilities) except exception.InvalidInput as ex: raise exception.InvalidStorageCapability(ex.msg) ================================================ FILE: delfin/api/validation/parameter_types.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common parameter types for validating request Body. """ import re import unicodedata import six from delfin.i18n import _ class ValidationRegex(object): def __init__(self, regex, reason): self.regex = regex self.reason = reason def _is_printable(char): """determine if a unicode code point is printable. This checks if the character is either "other" (mostly control codes), or a non-horizontal space. All characters that don't match those criteria are considered printable; that is: letters; combining marks; numbers; punctuation; symbols; (horizontal) space separators. """ category = unicodedata.category(char) return (not category.startswith("C") and (not category.startswith("Z") or category == "Zs")) def _get_all_chars(): for i in range(0xFFFF): yield six.unichr(i) # build a regex that matches all printable characters. This allows # spaces in the middle of the name. Also note that the regexp below # deliberately allows the empty string. This is so only the constraint # which enforces a minimum length for the name is triggered when an # empty string is tested. Otherwise it is not deterministic which # constraint fails and this causes issues for some unittests when # PYTHONHASHSEED is set randomly. def _build_regex_range(ws=True, invert=False, exclude=None): """Build a range regex for a set of characters in utf8. This builds a valid range regex for characters in utf8 by iterating the entire space and building up a set of x-y ranges for all the characters we find which are valid. :param ws: should we include whitespace in this range. :param exclude: any characters we want to exclude :param invert: invert the logic The inversion is useful when we want to generate a set of ranges which is everything that's not a certain class. For instance, produce all the non printable characters as a set of ranges. """ if exclude is None: exclude = [] regex = "" # are we currently in a range in_range = False # last character we found, for closing ranges last = None # last character we added to the regex, this lets us know that we # already have B in the range, which means we don't need to close # it out with B-B. While the later seems to work, it's kind of bad form. last_added = None def valid_char(char): if char in exclude: result = False elif ws: result = _is_printable(char) else: # Zs is the unicode class for space characters, of which # there are about 10 in this range. result = (_is_printable(char) and unicodedata.category(char) != "Zs") if invert is True: return not result return result # iterate through the entire character range. in_ for c in _get_all_chars(): if valid_char(c): if not in_range: regex += re.escape(c) last_added = c in_range = True else: if in_range and last != last_added: regex += "-" + re.escape(last) in_range = False last = c else: if in_range: regex += "-" + re.escape(c) return regex valid_name_regex_base = '^(?![%s])[%s]*(? 255: msg = _("The 'name' can not be greater than 255 characters.") raise exception.InvalidName(msg) return True @jsonschema.FormatChecker.cls_checks('uuid') def _validate_uuid_format(instance): return uuidutils.is_uuid_like(instance) class FormatChecker(jsonschema.FormatChecker): """A FormatChecker can output the message from cause exception We need understandable validation errors messages for users. When a custom checker has an exception, the FormatChecker will output a readable message provided by the checker. """ def check(self, param_value, format): """Check whether the param_value conforms to the given format. :argument param_value: the param_value to check :type: any primitive type (str, number, bool) :argument str format: the format that param_value should conform to :raises: :exc:`FormatError` if param_value does not conform to format """ if format not in self.checkers: return # For safety reasons custom checkers can be registered with # allowed exception types. Anything else will fall into the # default formatter. func, raises = self.checkers[format] result, cause = None, None try: result = func(param_value) except raises as e: cause = e if not result: msg = "%r is not a %r" % (param_value, format) raise jsonschema_exc.FormatError(msg, cause=cause) class _SchemaValidator(object): """A validator class This class is changed from Draft4Validator to validate minimum/maximum value of a string number(e.g. '10'). This changes can be removed when we tighten up the API definition and the XML conversion. Also FormatCheckers are added for checking data formats which would be passed through cinder api commonly. """ validator = None validator_org = jsonschema.Draft4Validator def __init__(self, schema, relax_additional_properties=False): validators = { 'minimum': self._validate_minimum, 'maximum': self._validate_maximum, } if relax_additional_properties: validators[ 'additionalProperties'] = _soft_validate_additional_properties validator_cls = jsonschema.validators.extend(self.validator_org, validators) format_checker = FormatChecker() self.validator = validator_cls(schema, format_checker=format_checker) def validate(self, *args, **kwargs): try: self.validator.validate(*args, **kwargs) except jsonschema.ValidationError as ex: if isinstance(ex.cause, exception.InvalidName): raise ex.cause elif len(ex.path) > 0: detail = _("Invalid input for field/attribute %(path)s." " %(message)s") % {'path': ex.path.pop(), 'message': ex.message} else: detail = ex.message raise exception.InvalidInput(detail) except TypeError as ex: # NOTE: If passing non string value to patternProperties parameter, # TypeError happens. Here is for catching the TypeError. detail = six.text_type(ex) raise exception.InvalidInput(detail) def _number_from_str(self, param_value): try: value = int(param_value) except (ValueError, TypeError): try: value = float(param_value) except (ValueError, TypeError): return None return value def _validate_minimum(self, validator, minimum, param_value, schema): param_value = self._number_from_str(param_value) if param_value is None: return return self.validator_org.VALIDATORS['minimum'](validator, minimum, param_value, schema) def _validate_maximum(self, validator, maximum, param_value, schema): param_value = self._number_from_str(param_value) if param_value is None: return return self.validator_org.VALIDATORS['maximum'](validator, maximum, param_value, schema) ================================================ FILE: delfin/api/views/__init__.py ================================================ ================================================ FILE: delfin/api/views/access_info.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants class ViewBuilder(object): def show(self, access_info): access_info_dict = access_info.to_dict() for access in constants.ACCESS_TYPE: if access_info.get(access): access_info[access].pop('password', None) return access_info_dict def show_all(self, access_infos): infos = [] for access_info in access_infos: access_info_dict = self.show(access_info) infos.append(access_info_dict) return dict(access_infos=infos) ================================================ FILE: delfin/api/views/alert_source.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from delfin import cryptor def build_alert_source(value): view = copy.deepcopy(value) view.pop("auth_key") view.pop("privacy_key") version = view['version'] if version.lower() == 'snmpv2c': view['community_string'] = cryptor.decode(view['community_string']) # Remove the key not belong to snmpv2c view.pop('username') view.pop('security_level') view.pop('auth_protocol') view.pop('privacy_protocol') view.pop('engine_id') view.pop('context_name') elif version.lower() == 'snmpv3': # Remove the key not belong to snmpv3 view.pop('community_string') return dict(view) def show_all_snmp_configs(values): snmp_configs = [] for snmp_config in values: snmp_config_dict = build_alert_source(dict(snmp_config)) snmp_configs.append(snmp_config_dict) return dict(snmp_configs=snmp_configs) ================================================ FILE: delfin/api/views/alerts.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_alerts(alerts): # Build list of alerts views = [build_alert(alert) for alert in alerts] return dict(alerts=views) def build_alert(alert): view = copy.deepcopy(alert) return dict(view) ================================================ FILE: delfin/api/views/controllers.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_controllers(controllers): # Build list of controllers views = [build_controller(controller) for controller in controllers] return dict(controllers=views) def build_controller(controller): view = copy.deepcopy(controller) return dict(view) ================================================ FILE: delfin/api/views/disks.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_disks(disks): # Build list of disks views = [build_disk(disk) for disk in disks] return dict(disks=views) def build_disk(disk): view = copy.deepcopy(disk) return dict(view) ================================================ FILE: delfin/api/views/filesystems.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_filesystems(filesystems): # Build list of filesystems views = [build_filesystem(filesystem) for filesystem in filesystems] return dict(filesystems=views) def build_filesystem(filesystem): view = copy.deepcopy(filesystem) return dict(view) ================================================ FILE: delfin/api/views/masking_views.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_masking_views(masking_views): # Build list of masking views views = [build_masking_view(masking_view) for masking_view in masking_views] return dict(masking_views=views) def build_masking_view(masking_view): view = copy.deepcopy(masking_view) return dict(view) ================================================ FILE: delfin/api/views/port_groups.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_port_groups(port_groups): # Build list of port groups views = [build_port_group(port_group) for port_group in port_groups] return dict(port_groups=views) def build_port_group(port_group): view = copy.deepcopy(port_group) return dict(view) ================================================ FILE: delfin/api/views/ports.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_ports(ports): # Build list of ports views = [build_port(port) for port in ports] return dict(ports=views) def build_port(port): view = copy.deepcopy(port) return dict(view) ================================================ FILE: delfin/api/views/qtrees.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_qtrees(qtrees): # Build list of qtrees views = [build_qtree(qtree) for qtree in qtrees] return dict(qtrees=views) def build_qtree(qtree): view = copy.deepcopy(qtree) return dict(view) ================================================ FILE: delfin/api/views/quotas.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_quotas(quotas): # Build list of quotas views = [build_quota(quota) for quota in quotas] return dict(quotas=views) def build_quota(quota): view = copy.deepcopy(quota) return dict(view) ================================================ FILE: delfin/api/views/shares.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_shares(shares): # Build list of shares views = [build_share(share) for share in shares] return dict(shares=views) def build_share(share): view = copy.deepcopy(share) return dict(view) ================================================ FILE: delfin/api/views/storage_host_groups.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_storage_host_groups(storage_host_groups): # Build list of storage host groups views = [build_storage_host_group(storage_host_group) for storage_host_group in storage_host_groups] return dict(storage_host_groups=views) def build_storage_host_group(storage_host_group): view = copy.deepcopy(storage_host_group) return dict(view) ================================================ FILE: delfin/api/views/storage_host_initiators.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_storage_host_initiators(storage_host_initiators): # Build list of storage host initiators views = [build_storage_host_initiator(storage_host_initiator) for storage_host_initiator in storage_host_initiators] return dict(storage_host_initiators=views) def build_storage_host_initiator(storage_host_initiator): view = copy.deepcopy(storage_host_initiator) return dict(view) ================================================ FILE: delfin/api/views/storage_hosts.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_storage_hosts(storage_hosts): # Build list of storage hosts views = [build_storage_host(storage_host) for storage_host in storage_hosts] return dict(storage_hosts=views) def build_storage_host(storage_host): view = copy.deepcopy(storage_host) return dict(view) ================================================ FILE: delfin/api/views/storage_pools.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_storage_pools(storage_pools): # Build list of storage_pools views = [build_storage_pool(storage_pool) for storage_pool in storage_pools] return dict(storage_pools=views) def build_storage_pool(storage_pool): view = copy.deepcopy(storage_pool) return dict(view) ================================================ FILE: delfin/api/views/storages.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from delfin.common import constants def build_storages(storages): # Build list of storages views = [build_storage(storage) for storage in storages] return dict(storages=views) def build_storage(storage): view = copy.deepcopy(storage) if view['sync_status'] == constants.SyncStatus.SYNCED: view['sync_status'] = 'SYNCED' else: view['sync_status'] = 'SYNCING' return dict(view) def build_capabilities(storage_info, capabilities): """build capability API response""" # build metadata metadata = dict() metadata['vendor'] = storage_info['vendor'] metadata['model'] = storage_info['model'] # create final view view = dict() view['metadata'] = metadata view['spec'] = capabilities return view ================================================ FILE: delfin/api/views/volume_groups.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_volume_groups(volume_groups): # Build list of volume groups views = [build_volume_group(volume_group) for volume_group in volume_groups] return dict(volume_groups=views) def build_volume_group(volume_group): view = copy.deepcopy(volume_group) return dict(view) ================================================ FILE: delfin/api/views/volumes.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy def build_volumes(volumes): # Build list of volumes views = [build_volume(volume) for volume in volumes] return dict(volumes=views) def build_volume(volume): view = copy.deepcopy(volume) return dict(view) ================================================ FILE: delfin/cmd/__init__.py ================================================ ================================================ FILE: delfin/cmd/alert.py ================================================ #!/usr/bin/env python # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for delfin alert service.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from delfin.common import config # noqa from delfin import service from delfin import utils from delfin import version CONF = cfg.CONF def main(): log.register_options(CONF) CONF(sys.argv[1:], project='delfin', version=version.version_string()) log.setup(CONF, "delfin") utils.monkey_patch() # Launch alert manager service alert_manager = service.AlertService.create(binary='delfin-alert', coordination=True) service.serve(alert_manager) service.wait() if __name__ == '__main__': main() ================================================ FILE: delfin/cmd/api.py ================================================ #!/usr/bin/env python # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for delfin OS API.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from delfin.common import config # noqa from delfin import service from delfin import utils from delfin import version CONF = cfg.CONF def main(): log.register_options(CONF) CONF(sys.argv[1:], project='delfin', version=version.version_string()) log.setup(CONF, "delfin") utils.monkey_patch() launcher = service.process_launcher() api_server = service.WSGIService('delfin', coordination=True) launcher.launch_service(api_server, workers=api_server.workers or 1) launcher.wait() if __name__ == '__main__': main() ================================================ FILE: delfin/cmd/task.py ================================================ #!/usr/bin/env python # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for delfin task service.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from delfin.common import config # noqa from delfin import service from delfin import utils from delfin import version CONF = cfg.CONF def main(): log.register_options(CONF) CONF(sys.argv[1:], project='delfin', version=version.version_string()) log.setup(CONF, "delfin") utils.monkey_patch() task_server = service.TaskService.create(binary='delfin-task', coordination=True) leader_election = service.LeaderElectionService.create() metrics_task_server = service. \ TaskService.create(binary='delfin-task', topic=CONF.host, manager='delfin.' 'task_manager.' 'metrics_manager.' 'MetricsTaskManager', coordination=True) service.serve(task_server) service.serve(leader_election) service.serve(metrics_task_server) service.wait() if __name__ == '__main__': main() ================================================ FILE: delfin/common/__init__.py ================================================ ================================================ FILE: delfin/common/alert_util.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging LOG = logging.getLogger(__name__) def fill_storage_attributes(alert_model, storage): """ Fills storage attributes for alert model """ alert_model['storage_id'] = storage['id'] alert_model['storage_name'] = storage['name'] alert_model['vendor'] = storage['vendor'] alert_model['model'] = storage['model'] alert_model['serial_number'] = storage['serial_number'] def is_alert_in_time_range(query_para, occur_time): # query_para contains optional begin_time and end_time # This function checks for their existence and validates if occur_time # falls in begin_time and end_time range if not query_para: return True begin_time = None end_time = None try: if query_para.get('begin_time'): begin_time = int(query_para.get('begin_time')) if query_para.get('end_time'): end_time = int(query_para.get('end_time')) except Exception: LOG.warning("Invalid query parameters received, ignoring them") return True if begin_time is not None and end_time is not None: if begin_time <= occur_time <= end_time: return True elif begin_time is not None and begin_time <= occur_time: return True elif end_time is not None and end_time >= occur_time: return True return False ================================================ FILE: delfin/common/config.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command-line flag library. Emulates gflags by wrapping cfg.ConfigOpts. The idea is to move fully to cfg eventually, and this wrapper is a stepping stone. """ import socket from oslo_config import cfg from oslo_log import log from oslo_middleware import cors from oslo_utils import netutils from delfin.common import constants LOG = log.getLogger(__name__) CONF = cfg.CONF log.register_options(CONF) core_opts = [ cfg.StrOpt('state_path', default='/var/lib/delfin', help="Top-level directory for maintaining delfin's state."), ] CONF.register_cli_opts(core_opts) global_opts = [ cfg.HostAddressOpt('my_ip', default=netutils.get_my_ipv4(), sample_default='', help='IP address of this host.'), cfg.HostnameOpt('host', default=socket.gethostname(), sample_default='', help='Name of this node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address.'), cfg.ListOpt('delfin_api_ext_list', default=[], help='Specify list of extensions to load when using ' 'delfin_api_extension option with ' 'delfin.api.contrib.select_extensions.'), cfg.ListOpt('delfin_api_extension', default=['delfin.api.contrib.standard_extensions'], help='The delfin api extensions to load.'), cfg.BoolOpt('monkey_patch', default=False, help='Whether to log monkey patching.'), cfg.ListOpt('monkey_patch_modules', default=[], help='List of modules or decorators to monkey patch.'), cfg.IntOpt('service_down_time', default=60, help='Maximum time since last check-in for up service.'), cfg.StrOpt('task_manager', default='delfin.task_manager.manager.TaskManager', help='Full class name for the task manager.'), cfg.StrOpt('delfin_task_topic', default='delfin-task', help='The topic task manager nodes listen on.'), cfg.StrOpt('delfin_alert_topic', default='delfin-alert', help='The topic alert manager nodes listen on.'), cfg.StrOpt('alert_manager', default='delfin.alert_manager.trap_receiver.TrapReceiver', help='Full class name for the trap receiver.'), cfg.StrOpt('delfin_cryptor', default='delfin.cryptor._Base64', help='cryptor type'), cfg.IntOpt('sync_task_expiration', default=1800, help='Sync task expiration in seconds.'), cfg.BoolOpt('snmp_validation_enabled', default=True, help='Whether alert source configuration to be validated ' 'through snmp connectivity.'), ] CONF.register_opts(global_opts) storage_driver_opts = [ cfg.StrOpt('ca_path', default='', help='"": Disable SSL certificate verification, ' '/path/to/file: Use SSL certificate from file location') ] CONF.register_opts(storage_driver_opts, group='storage_driver') telemetry_opts = [ cfg.IntOpt('performance_collection_interval', default=constants.TelemetryCollection .DEF_PERFORMANCE_COLLECTION_INTERVAL, help='default interval (in sec) for performance collection'), cfg.IntOpt('performance_history_on_reschedule', default=constants.TelemetryCollection .DEF_PERFORMANCE_HISTORY_ON_RESCHEDULE, help='default history(in sec) to be collected on a job ' 'reschedule'), cfg.IntOpt('performance_timestamp_overlap', default=constants.TelemetryCollection .DEF_PERFORMANCE_TIMESTAMP_OVERLAP, help='default overlap to be added on start_time in sec ' ), cfg.IntOpt('max_failed_task_retry_window', default=constants.TelemetryCollection .MAX_FAILED_TASK_RETRY_WINDOW, help='Maximum time window (in sec) until which delfin supports ' 'collection for failed tasks'), cfg.BoolOpt('enable_dynamic_subprocess', default=False, help='Enable dynamic subprocess metrics collection'), cfg.IntOpt('process_cleanup_interval', default=60, help='Background process cleanup call interval in sec'), cfg.IntOpt('task_cleanup_delay', default=10, help='Delay for task cleanup before killing child in sec'), cfg.IntOpt('group_change_detect_interval', default=30, help='Local executor group change detect interval in sec'), cfg.IntOpt('max_storages_in_child', default=5, help='Max storages handled by one local executor process'), cfg.IntOpt('max_childs_in_node', default=100000, help='Max processes that can be spawned before forcing fail'), cfg.IntOpt('node_weight', default=100, help='Weight for the node in the Hash Ring'), ] CONF.register_opts(telemetry_opts, "telemetry") def set_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-OpenStack-Request-ID', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id'], expose_headers=['X-Auth-Token', 'X-OpenStack-Request-ID', 'X-Subject-Token', 'X-Service-Token'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ================================================ FILE: delfin/common/constants.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple from pysnmp.entity import config # The maximum value a signed INT type may have DB_MAX_INT = 0x7FFFFFFF # Valid access type supported currently. ACCESS_TYPE = ['rest', 'ssh', 'cli', 'smis'] # Custom fields for Delfin objects class StorageStatus(object): NORMAL = 'normal' OFFLINE = 'offline' ABNORMAL = 'abnormal' DEGRADED = 'degraded' UNKNOWN = 'unknown' ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED, UNKNOWN) class StoragePoolStatus(object): NORMAL = 'normal' OFFLINE = 'offline' ABNORMAL = 'abnormal' DEGRADED = 'degraded' UNKNOWN = 'unknown' ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED, UNKNOWN) class VolumeStatus(object): AVAILABLE = 'available' ERROR = 'error' ALL = (AVAILABLE, ERROR) class StorageType(object): BLOCK = 'block' FILE = 'file' UNIFIED = 'unified' ALL = (BLOCK, FILE, UNIFIED) class SyncStatus(object): SYNCED = 0 class VolumeType(object): THICK = 'thick' THIN = 'thin' ALL = (THICK, THIN) class PortConnectionStatus(object): CONNECTED = 'connected' DISCONNECTED = 'disconnected' UNKNOWN = 'unknown' ALL = (CONNECTED, DISCONNECTED, UNKNOWN) class PortHealthStatus(object): NORMAL = 'normal' ABNORMAL = 'abnormal' UNKNOWN = 'unknown' ALL = (NORMAL, ABNORMAL, UNKNOWN) class PortType(object): FC = 'fc' ISCSI = 'iscsi' FICON = 'ficon' FCOE = 'fcoe' ETH = 'eth' SAS = 'sas' IB = 'ib' LOGIC = 'logic' CIFS = 'cifs' NFS = 'nfs' FCACHE = 'fcache' COMBO = 'combo' CNA = 'cna' RCIP = 'rcip' NFS_CIFS = 'nfs-cifs' OTHER = 'other' ALL = (FC, ISCSI, FICON, FCOE, ETH, SAS, IB, LOGIC, CIFS, NFS, FCACHE, COMBO, CNA, RCIP, NFS_CIFS, OTHER) class PortLogicalType(object): FRONTEND = 'frontend' BACKEND = 'backend' SERVICE = 'service' MANAGEMENT = 'management' INTERNAL = 'internal' MAINTENANCE = 'maintenance' INTERCONNECT = 'interconnect' CLUSTER = 'cluster' DATA = 'data' NODE_MGMT = 'node-mgmt' INTERCLUSTER = 'intercluster' CLUSTER_MGMT = 'cluster-mgmt' PHYSICAL = 'physical' IF_GROUP = 'if-group' VLAN = 'vlan' OTHER = 'other' ALL = (FRONTEND, BACKEND, SERVICE, MANAGEMENT, INTERNAL, MAINTENANCE, INTERCONNECT, CLUSTER, DATA, NODE_MGMT, INTERCLUSTER, CLUSTER_MGMT, PHYSICAL, IF_GROUP, VLAN, OTHER) class DiskStatus(object): NORMAL = 'normal' ABNORMAL = 'abnormal' DEGRADED = 'degraded' OFFLINE = 'offline' ALL = (NORMAL, ABNORMAL, DEGRADED, OFFLINE) class DiskPhysicalType(object): SATA = 'sata' SAS = 'sas' SSD = 'ssd' NL_SSD = 'nl-ssd' FC = 'fc' LUN = 'lun' ATA = 'ata' FLASH = 'flash' VMDISK = 'vmdisk' NL_SAS = 'nl-sas' SSD_CARD = 'ssd-card' SAS_FLASH_VP = 'sas-flash-vp' HDD = 'hdd' NVME_SSD = 'nvme-ssd' UNKNOWN = 'unknown' ALL = ( SATA, SAS, SSD, NL_SSD, FC, LUN, ATA, FLASH, VMDISK, NL_SAS, SSD_CARD, SAS_FLASH_VP, HDD, NVME_SSD, UNKNOWN) class DiskLogicalType(object): FREE = 'free' MEMBER = 'member' HOTSPARE = 'hotspare' CACHE = 'cache' AGGREGATE = 'aggregate' BROKEN = 'broken' FOREIGN = 'foreign' LABELMAINT = 'labelmaint' MAINTENANCE = 'maintenance' SHARED = 'shared' SPARE = 'spare' UNASSIGNED = 'unassigned' UNSUPPORTED = 'unsupported' REMOTE = 'remote' MEDIATOR = 'mediator' DATA = 'data' UNKNOWN = 'unknown' ALL = (FREE, MEMBER, HOTSPARE, CACHE, AGGREGATE, BROKEN, FOREIGN, LABELMAINT, MAINTENANCE, SHARED, SPARE, UNASSIGNED, UNSUPPORTED, REMOTE, MEDIATOR, DATA, UNKNOWN) class FilesystemStatus(object): NORMAL = 'normal' FAULTY = 'faulty' ALL = (NORMAL, FAULTY) class WORMType(object): NON_WORM = 'non_worm' AUDIT_LOG = 'audit_log' COMPLIANCE = 'compliance' ENTERPRISE = 'enterprise' ALL = (NON_WORM, AUDIT_LOG, COMPLIANCE, ENTERPRISE) class NASSecurityMode(object): MIXED = 'mixed' NATIVE = 'native' NTFS = 'ntfs' UNIX = 'unix' ALL = (MIXED, NATIVE, NTFS, UNIX) class QuotaType(object): TREE = 'tree' USER = 'user' GROUP = 'group' ALL = (TREE, USER, GROUP) class FSType(object): THICK = 'thick' THIN = 'thin' ALL = (THICK, THIN) class ShareProtocol(object): CIFS = 'cifs' NFS = 'nfs' FTP = 'ftp' HDFS = 'hdfs' ALL = (CIFS, NFS, FTP, HDFS) # Enumerations for alert severity class Severity(object): FATAL = 'Fatal' CRITICAL = 'Critical' MAJOR = 'Major' MINOR = 'Minor' WARNING = 'Warning' INFORMATIONAL = 'Informational' NOT_SPECIFIED = 'NotSpecified' # Enumerations for alert category class Category(object): FAULT = 'Fault' EVENT = 'Event' RECOVERY = 'Recovery' NOT_SPECIFIED = 'NotSpecified' # Enumerations for clear type class ClearType(object): AUTOMATIC = 'Automatic' MANUAL = 'Manual' class ControllerStatus(object): NORMAL = 'normal' OFFLINE = 'offline' FAULT = 'fault' DEGRADED = 'degraded' UNKNOWN = 'unknown' ALL = (NORMAL, OFFLINE, FAULT, DEGRADED, UNKNOWN) class InitiatorType(object): FC = 'fc' ISCSI = 'iscsi' NVME_OVER_ROCE = 'roce' SAS = 'sas' NVME_OVER_FABRIC = 'nvme-of' UNKNOWN = 'unknown' ALL = (FC, ISCSI, NVME_OVER_ROCE, SAS, NVME_OVER_FABRIC, UNKNOWN) # Enumerations for alert type based on X.733 Specification class EventType(object): COMMUNICATIONS_ALARM = 'CommunicationsAlarm' EQUIPMENT_ALARM = 'EquipmentAlarm' PROCESSING_ERROR_ALARM = 'ProcessingErrorAlarm' QUALITY_OF_SERVICE_ALARM = 'QualityOfServiceAlarm' ENVIRONMENTAL_ALARM = 'EnvironmentalAlarm' INTEGRITY_VIOLATION = 'IntegrityViolation' OPERATIONAL_VIOLATION = 'OperationalViolation' PHYSICAL_VIOLATION = 'PhysicalViolation' SECURITY_MECHANISM_VIOLATION = 'SecurityServiceOrMechanismViolation' TIME_DOMAIN_VIOLATION = 'TimeDomainViolation' NOT_SPECIFIED = 'NotSpecified' # Default resource type for alert DEFAULT_RESOURCE_TYPE = 'Storage' # Default port for connecting to alert source DEFAULT_SNMP_CONNECT_PORT = 161 # Default retry count for connecting to alert source DEFAULT_SNMP_RETRY_NUM = 1 # Default expiration time(in sec) for a alert source connect request DEFAULT_SNMP_EXPIRATION_TIME = 2 # OID used for snmp query, Below oid refers to sysDescr SNMP_QUERY_OID = '1.3.6.1.2.1.1.1.0' # Alert id for internal alerts SNMP_CONNECTION_FAILED_ALERT_ID = '19660818' # Maps to convert config values to pysnmp values AUTH_PROTOCOL_MAP = {"hmacsha": config.usmHMACSHAAuthProtocol, "hmacmd5": config.usmHMACMD5AuthProtocol, "hmcsha2224": config.usmHMAC128SHA224AuthProtocol, "hmcsha2256": config.usmHMAC192SHA256AuthProtocol, "hmcsha2384": config.usmHMAC256SHA384AuthProtocol, "hmcsha2512": config.usmHMAC384SHA512AuthProtocol, "none": "None"} PRIVACY_PROTOCOL_MAP = {"aes": config.usmAesCfb128Protocol, "des": config.usmDESPrivProtocol, "aes192": config.usmAesCfb192Protocol, "aes256": config.usmAesCfb256Protocol, "3des": config.usm3DESEDEPrivProtocol, "none": "None"} # Enumerations for clear type class SecurityLevel(object): AUTHPRIV = 'authPriv' AUTHNOPRIV = 'authNoPriv' NOAUTHNOPRIV = 'noAuthnoPriv' # Performance collection constants and common models # Metric model metric_struct = namedtuple("Metric", "name labels values") class ResourceType(object): STORAGE = 'storage' STORAGE_POOL = 'storagePool' VOLUME = 'volume' CONTROLLER = 'controller' PORT = 'port' DISK = 'disk' FILESYSTEM = 'filesystem' SHARE = 'share' ALL = (STORAGE, STORAGE_POOL, VOLUME, CONTROLLER, PORT, DISK, FILESYSTEM, SHARE) # Unified Array metrics model DELFIN_ARRAY_METRICS = [ "responseTime", "throughput", "readThroughput", "writeThroughput", "requests", "readRequests", "writeRequests" ] BLOCK_SIZE = 4096 class ResourceSync(object): START = 100 SUCCEED = 100 FAILED = 101 class TelemetryCollection(object): """Performance monitoring task name""" PERFORMANCE_TASK_METHOD = "delfin.task_manager.scheduler.schedulers." \ "telemetry.performance_collection_handler." \ "PerformanceCollectionHandler" """Failed Performance monitoring job interval""" FAILED_JOB_SCHEDULE_INTERVAL = 900 """Failed Performance monitoring retry count""" MAX_FAILED_JOB_RETRY_COUNT = 5 """Default performance collection interval""" DEF_PERFORMANCE_COLLECTION_INTERVAL = 900 DEF_PERFORMANCE_HISTORY_ON_RESCHEDULE = 1800 DEF_PERFORMANCE_TIMESTAMP_OVERLAP = 60 """Maximum failed task retry window in seconds""" MAX_FAILED_TASK_RETRY_WINDOW = 7200 class TelemetryTaskStatus(object): """Telemetry task enum""" TASK_EXEC_STATUS_SUCCESS = True TASK_EXEC_STATUS_FAILURE = False class TelemetryJobStatus(object): """Telemetry jobs enum""" FAILED_JOB_STATUS_SUCCESS = "Success" FAILED_JOB_STATUS_RETRYING = "Retrying" FAILED_JOB_STATUS_INIT = "Initialized" Metric = namedtuple('Metric', 'name unit description') class MetricUnit: IOPS = 'IOPS' MBS = 'MB/s' MS = 'ms' KB = 'KB' PERCENTAGE = '%' ALL = (IOPS, MBS, MS, KB, PERCENTAGE) class Metrics: IOPS = Metric(name='iops', unit=MetricUnit.IOPS, description='Read/write operations per second') READ_IOPS = Metric(name='readIops', unit=MetricUnit.IOPS, description='Read operations per second') WRITE_IOPS = Metric(name='writeIops', unit=MetricUnit.IOPS, description='Write operations per second') THROUGHPUT = Metric(name='throughput', unit=MetricUnit.MBS, description='Total data transferred per second') READ_THROUGHPUT = Metric(name='readThroughput', unit=MetricUnit.MBS, description='Total read data transferred per ' 'second') WRITE_THROUGHPUT = Metric(name='writeThroughput', unit=MetricUnit.MBS, description='Total write data transferred per ' 'second') RESPONSE_TIME = Metric(name='responseTime', unit=MetricUnit.MS, description='Average time taken for an IO ' 'operation in ms') READ_RESPONSE_TIME = Metric(name='readResponseTime', unit=MetricUnit.MS, description='Read average time taken for an ' 'IO operation in ms') WRITE_RESPONSE_TIME = Metric(name='writeResponseTime', unit=MetricUnit.MS, description='Write average time taken for an ' 'IO operation in ms') IO_SIZE = Metric(name='ioSize', unit=MetricUnit.KB, description='The average size of IO requests in KB') READ_IO_SIZE = Metric(name='readIoSize', unit=MetricUnit.KB, description='The average size of read IO requests ' 'in KB') WRITE_IO_SIZE = Metric(name='writeIoSize', unit=MetricUnit.KB, description='The average size of write IO requests ' 'in KB') CACHE_HIT_RATIO = Metric(name='cacheHitRatio', unit=MetricUnit.PERCENTAGE, description='Percentage of ops that are cache ' 'hits') READ_CACHE_HIT_RATIO = Metric(name='readCacheHitRatio', unit=MetricUnit.PERCENTAGE, description='Percentage of read ops that ' 'are cache hits') WRITE_CACHE_HIT_RATIO = Metric(name='writeCacheHitRatio', unit=MetricUnit.PERCENTAGE, description='Percentage of write ops that ' 'are cache hits') CPU_USAGE = Metric(name='cpuUsage', unit=MetricUnit.PERCENTAGE, description='Percentage of cpu usage.') class StorageMetric: """Storage metrics""" IOPS = Metrics.IOPS READ_IOPS = Metrics.READ_IOPS WRITE_IOPS = Metrics.WRITE_IOPS THROUGHPUT = Metrics.THROUGHPUT READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO IO_SIZE = Metrics.IO_SIZE READ_IO_SIZE = Metrics.READ_IO_SIZE WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE class StoragePoolMetric: """Storage pool metrics""" IOPS = Metrics.IOPS READ_IOPS = Metrics.READ_IOPS WRITE_IOPS = Metrics.WRITE_IOPS THROUGHPUT = Metrics.THROUGHPUT READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO IO_SIZE = Metrics.IO_SIZE READ_IO_SIZE = Metrics.READ_IO_SIZE WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE class VolumeMetric: """Volume metrics""" IOPS = Metrics.IOPS READ_IOPS = Metrics.READ_IOPS WRITE_IOPS = Metrics.WRITE_IOPS THROUGHPUT = Metrics.THROUGHPUT READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME IO_SIZE = Metrics.IO_SIZE READ_IO_SIZE = Metrics.READ_IO_SIZE WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO class ControllerMetric: """Controller metrics""" IOPS = Metrics.IOPS READ_IOPS = Metrics.READ_IOPS WRITE_IOPS = Metrics.WRITE_IOPS THROUGHPUT = Metrics.THROUGHPUT READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO IO_SIZE = Metrics.IO_SIZE READ_IO_SIZE = Metrics.READ_IO_SIZE WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE CPU_USAGE = Metrics.CPU_USAGE class PortMetric: """Port metrics""" IOPS = Metrics.IOPS READ_IOPS = Metrics.READ_IOPS WRITE_IOPS = Metrics.WRITE_IOPS THROUGHPUT = Metrics.THROUGHPUT READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO IO_SIZE = Metrics.IO_SIZE READ_IO_SIZE = Metrics.READ_IO_SIZE WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE class DiskMetric: """Disk metrics""" IOPS = Metrics.IOPS READ_IOPS = Metrics.READ_IOPS WRITE_IOPS = Metrics.WRITE_IOPS THROUGHPUT = Metrics.THROUGHPUT READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO class FileSystemMetric: """File system metrics""" IOPS = Metrics.IOPS READ_IOPS = Metrics.READ_IOPS WRITE_IOPS = Metrics.WRITE_IOPS THROUGHPUT = Metrics.THROUGHPUT READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME IO_SIZE = Metrics.IO_SIZE READ_IO_SIZE = Metrics.READ_IO_SIZE WRITE_IO_SIZE = Metrics.WRITE_IO_SIZE SNMP_SUPPORTED_MODELS = ('vsp', '3par', 'cmode', 'msa', 'hnas') class HostStatus(object): NORMAL = 'normal' OFFLINE = 'offline' ABNORMAL = 'abnormal' DEGRADED = 'degraded' ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED) class HostOSTypes(object): LINUX = 'Linux' WINDOWS = 'Windows' SOLARIS = 'Solaris' HP_UX = 'HP-UX' AIX = 'AIX' XEN_SERVER = 'XenServer' VMWARE_ESX = 'VMware ESX' LINUX_VIS = 'LINUX_VIS' WINDOWS_SERVER_2012 = 'Windows Server 2012' ORACLE_VM = 'Oracle VM' OPEN_VMS = 'Open VMS' MAC_OS = 'Mac OS' UNKNOWN = 'Unknown' ALL = (LINUX, WINDOWS, SOLARIS, HP_UX, AIX, XEN_SERVER, VMWARE_ESX, LINUX_VIS, WINDOWS_SERVER_2012, ORACLE_VM, OPEN_VMS, MAC_OS, UNKNOWN) class InitiatorStatus(object): ONLINE = 'online' OFFLINE = 'offline' UNKNOWN = 'unknown' ALL = (ONLINE, OFFLINE, UNKNOWN) ================================================ FILE: delfin/common/sqlalchemyutils.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of paginate query.""" import datetime from oslo_log import log as logging from six.moves import range import sqlalchemy import sqlalchemy.sql as sa_sql from sqlalchemy.sql import type_api from delfin.db import api from delfin import exception from delfin.i18n import _ LOG = logging.getLogger(__name__) _TYPE_SCHEMA = { 'datetime': datetime.datetime(1900, 1, 1), 'big_integer': 0, 'integer': 0, 'string': '' } def _get_default_column_value(model, column_name): """Return the default value of the columns from DB table. In postgreDB case, if no right default values are being set, an psycopg2.DataError will be thrown. """ attr = getattr(model, column_name) # Return the default value directly if the model contains. Otherwise return # a default value which is not None. if attr.default and isinstance(attr.default, type_api.TypeEngine): return attr.default.arg attr_type = attr.type return _TYPE_SCHEMA[attr_type.__visit_name__] # TODO(wangxiyuan): Use oslo_db.sqlalchemy.utils.paginate_query once it is # stable and afforded by the minimum version in requirement.txt. # copied from glance/db/sqlalchemy/api.py def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None, offset=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :param offset: the number of items to skip from the marker or from the first element. :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if sort_dir and sort_dirs: raise AssertionError('Both sort_dir and sort_dirs specified.') # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] if len(sort_dirs) != len(sort_keys): raise AssertionError( 'sort_dirs length is not equal to sort_keys length.') # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exception.InvalidInput('Invalid sort key') if not api.is_orm_value(sort_key_attr): raise exception.InvalidInput('Invalid sort key') query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) if v is None: v = _get_default_column_value(model, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(0, len(sort_keys)): crit_attrs = [] for j in range(0, i): model_attr = getattr(model, sort_keys[j]) default = _get_default_column_value(model, sort_keys[j]) attr = sa_sql.expression.case([(model_attr.isnot(None), model_attr), ], else_=default) crit_attrs.append((attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) default = _get_default_column_value(model, sort_keys[i]) attr = sa_sql.expression.case([(model_attr.isnot(None), model_attr), ], else_=default) if sort_dirs[i] == 'desc': crit_attrs.append((attr < marker_values[i])) elif sort_dirs[i] == 'asc': crit_attrs.append((attr > marker_values[i])) else: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) if offset: query = query.offset(offset) return query ================================================ FILE: delfin/context.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of delfin.""" import copy from oslo_context import context from oslo_utils import timeutils import six from delfin.i18n import _ class RequestContext(context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id=None, project_id=None, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, service_catalog=None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ user = kwargs.pop('user', None) tenant = kwargs.pop('tenant', None) super(RequestContext, self).__init__( auth_token=auth_token, user=user_id or user, domain=kwargs.pop('domain', None), user_domain=kwargs.pop('user_domain', None), project_domain=kwargs.pop('project_domain', None), is_admin=is_admin, read_only=kwargs.pop('read_only', False), show_deleted=kwargs.pop('show_deleted', False), request_id=request_id, resource_uuid=kwargs.pop('resource_uuid', None), overwrite=overwrite, roles=roles) self.user_id = self.user self.tenant = project_id or tenant self.project_id = self.tenant self.storage_id = None self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if service_catalog: self.service_catalog = [s for s in service_catalog if s.get('type') in ('compute', 'volume')] else: self.service_catalog = [] self.quota_class = quota_class def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def to_dict(self): values = super(RequestContext, self).to_dict() values.update({ 'user_id': getattr(self, 'user_id', None), 'project_id': getattr(self, 'project_id', None), 'storage_id': getattr(self, 'storage_id', None), 'read_deleted': getattr(self, 'read_deleted', None), 'remote_address': getattr(self, 'remote_address', None), 'timestamp': self.timestamp.isoformat() if hasattr( self, 'timestamp') else None, 'quota_class': getattr(self, 'quota_class', None), 'service_catalog': getattr(self, 'service_catalog', None)}) return values @classmethod def from_dict(cls, values): return cls(**values) def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" ctx = copy.deepcopy(self) ctx.is_admin = True if 'admin' not in ctx.roles: ctx.roles.append('admin') if read_deleted is not None: ctx.read_deleted = read_deleted return ctx def get_admin_context(read_deleted="no"): return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) ================================================ FILE: delfin/coordination.py ================================================ # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tooz Coordination and locking utilities.""" import inspect import decorator from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils import six from tooz import coordination from tooz import locking from tooz import partitioner from delfin import cryptor from delfin import exception from delfin.i18n import _ LOG = log.getLogger(__name__) coordination_opts = [ cfg.StrOpt('backend_type', default='redis', help='The backend type for distributed coordination.' 'Backend could be redis, mysql, zookeeper and so on.' 'For more supported backend, please check Tooz'), cfg.StrOpt('backend_user', default='', help='The backend user for distributed coordination.'), cfg.StrOpt('backend_password', help='The backend password to use ' 'for distributed coordination.'), cfg.StrOpt('backend_server', default='127.0.0.1:6379', help='The backend server for distributed coordination.'), cfg.IntOpt('expiration', default=100, help='The expiration(in second) of the lock.'), cfg.IntOpt('lease_timeout', default=15, help='The expiration(in second) of the lock.'), ] CONF = cfg.CONF CONF.register_opts(coordination_opts, group='coordination') class Coordinator(object): """Tooz coordination wrapper. Coordination member id is created from concatenated `prefix` and `agent_id` parameters. :param str agent_id: Agent identifier :param str prefix: Used to provide member identifier with a meaningful prefix. """ def __init__(self, agent_id=None, prefix=''): self.coordinator = None self.agent_id = agent_id or uuidutils.generate_uuid() self.started = False self.prefix = prefix def start(self): """Connect to coordination back end.""" if self.started: return # NOTE(gouthamr): Tooz expects member_id as a byte string. member_id = (self.prefix + self.agent_id).encode('ascii') LOG.info('Started Coordinator (Agent ID: %(agent)s, prefix: ' '%(prefix)s)', {'agent': self.agent_id, 'prefix': self.prefix}) backend_url = _get_redis_backend_url() self.coordinator = coordination.get_coordinator( backend_url, member_id, timeout=CONF.coordination.expiration) self.coordinator.start(start_heart=True) self.started = True def stop(self): """Disconnect from coordination back end.""" msg = 'Stopped Coordinator (Agent ID: %(agent)s, prefix: %(prefix)s)' msg_args = {'agent': self.agent_id, 'prefix': self.prefix} if self.started: self.coordinator.stop() self.coordinator = None self.started = False LOG.info(msg, msg_args) def get_lock(self, name): """Return a Tooz back end lock. :param str name: The lock name that is used to identify it across all nodes. """ # NOTE(gouthamr): Tooz expects lock name as a byte string lock_name = (self.prefix + name).encode('ascii') if self.started: return self.coordinator.get_lock(lock_name) else: raise exception.LockCreationFailed(_('Coordinator uninitialized.')) LOCK_COORDINATOR = Coordinator(prefix='delfin-') class LeaderElectionCoordinator(Coordinator): def __init__(self, agent_id=None): super(LeaderElectionCoordinator, self). \ __init__(agent_id=agent_id, prefix="leader_election") self.group = None def start(self): """Connect to coordination back end.""" if self.started: return # NOTE(gouthamr): Tooz expects member_id as a byte string. member_id = (self.prefix + "-" + self.agent_id).encode('ascii') LOG.info('Started Coordinator (Agent ID: %(agent)s, ' 'prefix: %(prefix)s)', {'agent': self.agent_id, 'prefix': self.prefix}) backend_url = _get_redis_backend_url() self.coordinator = coordination.get_coordinator( backend_url, member_id, timeout=CONF.coordination.lease_timeout) self.coordinator.start() self.started = True def ensure_group(self, group): req = self.coordinator.get_groups() groups = req.get() try: # Check if group exist groups.index(group) except Exception: # Create a group if not exist LOG.debug("Exception is expected as requested group not available " "in tooz backend. Creating the group") request = self.coordinator.create_group(group) request.get() else: LOG.info("Leader group already exist") self.group = group def join_group(self): if self.group: request = self.coordinator.join_group(self.group) request.get() def register_on_start_leading_callback(self, callback): return self.coordinator.watch_elected_as_leader(self.group, callback) def send_heartbeat(self): return self.coordinator.heartbeat() def start_leader_watch(self): return self.coordinator.run_watchers() def stop(self): """Disconnect from coordination back end.""" if self.started: self.coordinator.stop() self.coordinator = None self.started = False LOG.info('Stopped Coordinator (Agent ID: %(agent)s', {'agent': self.agent_id}) def is_still_leader(self): for acquired_lock in self.coordinator._acquired_locks: return acquired_lock.is_still_owner() return False class Lock(locking.Lock): """Lock with dynamic name. :param str lock_name: Lock name. :param dict lock_data: Data for lock name formatting. :param coordinator: Coordinator object to use when creating lock. Defaults to the global coordinator. Using it like so:: with Lock('mylock'): ... ensures that only one process at a time will execute code in context. Lock name can be formatted using Python format string syntax:: Lock('foo-{share.id}, {'share': ...,}') Available field names are keys of lock_data. """ def __init__(self, lock_name, lock_data=None, coordinator=None): super(Lock, self).__init__(six.text_type(id(self))) lock_data = lock_data or {} self.coordinator = coordinator or LOCK_COORDINATOR self.blocking = True self.lock = self._prepare_lock(lock_name, lock_data) def _prepare_lock(self, lock_name, lock_data): if not isinstance(lock_name, six.string_types): raise ValueError(_('Not a valid string: %s') % lock_name) return self.coordinator.get_lock(lock_name.format(**lock_data)) def acquire(self, blocking=None): """Attempts to acquire lock. :param blocking: If True, blocks until the lock is acquired. If False, returns right away. Otherwise, the value is used as a timeout value and the call returns maximum after this number of seconds. :return: returns true if acquired (false if not) :rtype: bool """ blocking = self.blocking if blocking is None else blocking return self.lock.acquire(blocking=blocking) def release(self): """Attempts to release lock. The behavior of releasing a lock which was not acquired in the first place is undefined. """ self.lock.release() def synchronized(lock_name, blocking=True, coordinator=None): """Synchronization decorator. :param str lock_name: Lock name. :param blocking: If True, blocks until the lock is acquired. If False, raises exception when not acquired. Otherwise, the value is used as a timeout value and if lock is not acquired after this number of seconds exception is raised. :param coordinator: Coordinator object to use when creating lock. Defaults to the global coordinator. :raises tooz.coordination.LockAcquireFailed: if lock is not acquired Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one process will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. Lock name can be formatted using Python format string syntax:: @synchronized('{f_name}-{shr.id}-{snap[name]}') def foo(self, shr, snap): ... Available field names are: decorated function parameters and `f_name` as a decorated function name. """ @decorator.decorator def _synchronized(f, *a, **k): call_args = inspect.getcallargs(f, *a, **k) call_args['f_name'] = f.__name__ lock = Lock(lock_name, call_args, coordinator) with lock(blocking): LOG.info('Lock "%(name)s" acquired by "%(function)s".', {'name': lock_name, 'function': f.__name__}) return f(*a, **k) return _synchronized def _get_redis_backend_url(): cipher_password = getattr(CONF.coordination, 'backend_password', None) if cipher_password is not None: # If password is needed, the password should be # set in config file with cipher text # And in this scenario, these are also needed for backend: # {backend_type}://[{user}]:{password}@{ip}:{port}. plaintext_password = cryptor.decode(cipher_password) # User could be null backend_url = '{backend_type}://{user}:{password}@{server}' \ .format(backend_type=CONF.coordination.backend_type, user=CONF.coordination.backend_user, password=plaintext_password, server=CONF.coordination.backend_server) else: backend_url = '{backend_type}://{server}' \ .format(backend_type=CONF.coordination.backend_type, server=CONF.coordination.backend_server) return backend_url class ConsistentHashing(Coordinator): GROUP_NAME = 'partitioner_group' PARTITIONS = 2**5 def __init__(self): super(ConsistentHashing, self). \ __init__(agent_id=CONF.host, prefix="") def join_group(self): try: weight = CONF.telemetry.node_weight self.coordinator.join_partitioned_group(self.GROUP_NAME, weight=weight, partitions=self.PARTITIONS) except coordination.MemberAlreadyExist: LOG.info('Member %s already in partitioner_group' % CONF.host) def get_task_executor(self, task_id): part = partitioner.Partitioner(self.coordinator, self.GROUP_NAME) members = part.members_for_object(task_id) for member in members: LOG.info('For task id %s, host should be %s' % (task_id, member)) return member.decode('utf-8') def register_watcher_func(self, on_node_join, on_node_leave): self.coordinator.watch_join_group(self.GROUP_NAME, on_node_join) self.coordinator.watch_leave_group(self.GROUP_NAME, on_node_leave) def watch_group_change(self): self.coordinator.run_watchers() class GroupMembership(Coordinator): def __init__(self, agent_id): super(GroupMembership, self). \ __init__(agent_id=agent_id, prefix="") def create_group(self, group): try: self.coordinator.create_group(group.encode()).get() except coordination.GroupAlreadyExist: LOG.info("Group {0} already exist".format(group)) def delete_group(self, group): try: self.coordinator.delete_group(group.encode()).get() except coordination.GroupNotCreated: LOG.info("Group {0} not created".format(group)) except coordination.GroupNotEmpty: LOG.info("Group {0} not empty".format(group)) except coordination.ToozError: LOG.info("Group {0} internal error while delete".format(group)) def join_group(self, group): try: self.coordinator.join_group(group.encode()).get() except coordination.MemberAlreadyExist: LOG.info('Member %s already in group' % group) def leave_group(self, group): try: self.coordinator.leave_group(group.encode()).get() except coordination.GroupNotCreated: LOG.info('Group %s not created' % group) def get_members(self, group): try: return self.coordinator.get_members(group.encode()).get() except coordination.GroupNotCreated: LOG.info('Group %s not created' % group) return None def register_watcher_func(self, group, on_process_join, on_process_leave): self.coordinator.watch_join_group(group.encode(), on_process_join) self.coordinator.watch_leave_group(group.encode(), on_process_leave) def watch_group_change(self): self.coordinator.run_watchers() ================================================ FILE: delfin/cryptor.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 from abc import ABCMeta, abstractmethod from oslo_config import cfg from oslo_utils import importutils CONF = cfg.CONF class ICryptor(metaclass=ABCMeta): @staticmethod @abstractmethod def encode(plain_text): pass @staticmethod @abstractmethod def decode(cipher_text): pass class _Base64(ICryptor): @staticmethod def encode(data): """Base64 encode :param data: The plain text that need to be encode :type str: :return cipher data: The encoded cipher text :type str: """ return base64.b64encode(data.encode()).decode('utf-8') @staticmethod def decode(data): """Base64 decode :param data: The cipher text that need to be decode :type str: :return plain data: The decoded plain text :type str: """ return base64.b64decode(data).decode('utf-8') _cryptor = importutils.import_class(CONF.delfin_cryptor) def encode(plain_text): return _cryptor.encode(plain_text) def decode(cipher_text): return _cryptor.decode(cipher_text) ================================================ FILE: delfin/db/__init__.py ================================================ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DB abstraction for Delfin """ from delfin.db.api import * # noqa ================================================ FILE: delfin/db/api.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the delfin.db namespace. Call these functions from delfin.db namespace, not the delfin.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/delfin/delfin.sqlite`. :enable_new_services: when adding a new service to the database, is it in the storage_pool of available hardware (Default: True) """ from oslo_config import cfg from oslo_db import api as db_api db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for database.'), ] CONF = cfg.CONF CONF.register_opts(db_opts, "database") _BACKEND_MAPPING = {'sqlalchemy': 'delfin.db.sqlalchemy.api'} IMPL = db_api.DBAPI(CONF.database.db_backend, backend_mapping=_BACKEND_MAPPING, lazy=True) def register_db(): """Create database and tables.""" IMPL.register_db() def storage_get(context, storage_id): """Retrieve a storage device.""" return IMPL.storage_get(context, storage_id) def storage_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage devices. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage """ return IMPL.storage_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def storage_create(context, values): """Add a storage device from the values dictionary.""" return IMPL.storage_create(context, values) def storage_update(context, storage_id, values): """Update a storage device with the values dictionary.""" return IMPL.storage_update(context, storage_id, values) def storage_delete(context, storage_id): """Delete a storage device.""" return IMPL.storage_delete(context, storage_id) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volumes_create(context, values): """Create multiple volumes.""" return IMPL.volumes_create(context, values) def volume_update(context, volume_id, values): """Update a volume with the values dictionary.""" return IMPL.volume_update(context, volume_id, values) def volumes_update(context, values): """Update multiple volumes.""" return IMPL.volumes_update(context, values) def volumes_delete(context, values): """Delete multiple volumes.""" return IMPL.volumes_delete(context, values) def volume_get(context, volume_id): """Get a volume or raise an exception if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volumes. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of volumes """ return IMPL.volume_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def volume_delete_by_storage(context, storage_id): """Delete all the volumes of a device.""" return IMPL.volume_delete_by_storage(context, storage_id) def storage_pool_create(context, storage_pool): """Add a storage_storage_pool.""" return IMPL.storage_pool_create(context, storage_pool) def storage_pools_create(context, storage_pools): """Add multiple storage_pools.""" return IMPL.storage_pools_create(context, storage_pools) def storage_pool_update(context, storage_pool_id, storage_pool): """Update a storage_pool.""" return IMPL.storage_pool_update(context, storage_pool_id, storage_pool) def storage_pools_update(context, storage_pools): """Update multiple storage_pools""" return IMPL.storage_pools_update(context, storage_pools) def storage_pools_delete(context, storage_pools): """Delete storage_pools.""" return IMPL.storage_pools_delete(context, storage_pools) def storage_pool_get(context, storage_pool_id): """Get a storage_pool or raise an exception if it does not exist.""" return IMPL.storage_pool_get(context, storage_pool_id) def storage_pool_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage_pools. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage_pools """ return IMPL.storage_pool_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def storage_pool_delete_by_storage(context, storage_id): """Delete all the storage_pool of a device.""" return IMPL.storage_pool_delete_by_storage(context, storage_id) def controllers_create(context, values): """Create multiple controllers.""" return IMPL.controllers_create(context, values) def controllers_update(context, values): """Update multiple controllers.""" return IMPL.controllers_update(context, values) def controllers_delete(context, values): """Delete multiple controllers.""" return IMPL.controllers_delete(context, values) def controller_create(context, values): """Create a controller from the values dictionary.""" return IMPL.controller_create(context, values) def controller_update(context, controller_id, values): """Update a controller with the values dictionary.""" return IMPL.controller_update(context, controller_id, values) def controller_get(context, controller_id): """Get a controller or raise an exception if it does not exist.""" return IMPL.controller_get(context, controller_id) def controller_delete_by_storage(context, storage_id): """Delete a controller or raise an exception if it does not exist.""" return IMPL.controller_delete_by_storage(context, storage_id) def controller_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all controllers. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of controllers """ return IMPL.controller_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def ports_create(context, values): """Create multiple ports.""" return IMPL.ports_create(context, values) def ports_update(context, values): """Update multiple ports.""" return IMPL.ports_update(context, values) def ports_delete(context, values): """Delete multiple ports.""" return IMPL.ports_delete(context, values) def port_create(context, values): """Create a port from the values dictionary.""" return IMPL.port_create(context, values) def port_update(context, port_id, values): """Update a port with the values dictionary.""" return IMPL.port_update(context, port_id, values) def port_get(context, port_id): """Get a port or raise an exception if it does not exist.""" return IMPL.port_get(context, port_id) def port_delete_by_storage(context, storage_id): """Delete a port or raise an exception if it does not exist.""" return IMPL.port_delete_by_storage(context, storage_id) def port_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all ports. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of controllers """ return IMPL.port_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def disks_create(context, values): """Create multiple disks.""" return IMPL.disks_create(context, values) def disks_update(context, values): """Update multiple disks.""" return IMPL.disks_update(context, values) def disks_delete(context, values): """Delete multiple disks.""" return IMPL.disks_delete(context, values) def disk_create(context, values): """Create a disk from the values dictionary.""" return IMPL.disk_create(context, values) def disk_update(context, disk_id, values): """Update a disk withe the values dictionary.""" return IMPL.disk_update(context, disk_id, values) def disk_get(context, disk_id): """Get a disk or raise an exception if it does not exist.""" return IMPL.disk_get(context, disk_id) def disk_delete_by_storage(context, storage_id): """Delete a disk or raise an exception if it does not exist.""" return IMPL.disk_delete_by_storage(context, storage_id) def disk_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all disks. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of disks """ return IMPL.disk_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def filesystems_create(context, values): """Create multiple filesystems.""" return IMPL.filesystems_create(context, values) def filesystems_update(context, values): """Update multiple filesystems.""" return IMPL.filesystems_update(context, values) def filesystems_delete(context, values): """Delete multiple filesystems.""" return IMPL.filesystems_delete(context, values) def filesystem_create(context, values): """Create a filesystem from the values dictionary.""" return IMPL.filesystem_create(context, values) def filesystem_update(context, filesystem_id, values): """Update a filesystem with the values dictionary.""" return IMPL.filesystem_update(context, filesystem_id, values) def filesystem_get(context, filesystem_id): """Get a filesystem or raise an exception if it does not exist.""" return IMPL.filesystem_get(context, filesystem_id) def filesystem_delete_by_storage(context, storage_id): """Delete a filesystem or raise an exception if it does not exist.""" return IMPL.filesystem_delete_by_storage(context, storage_id) def filesystem_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all filesystems. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of controllers """ return IMPL.filesystem_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def quotas_create(context, values): """Create multiple quotas.""" return IMPL.quotas_create(context, values) def quotas_update(context, values): """Update multiple quotas.""" return IMPL.quotas_update(context, values) def quotas_delete(context, values): """Delete multiple quotas.""" return IMPL.quotas_delete(context, values) def quota_create(context, values): """Create a quota from the values dictionary.""" return IMPL.quota_create(context, values) def quota_update(context, quota_id, values): """Update a quota with the values dictionary.""" return IMPL.quota_update(context, quota_id, values) def quota_get(context, quota_id): """Get a quota or raise an exception if it does not exist.""" return IMPL.quota_get(context, quota_id) def quota_delete_by_storage(context, storage_id): """Delete a quota or raise an exception if it does not exist.""" return IMPL.quota_delete_by_storage(context, storage_id) def quota_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all quotas. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of controllers """ return IMPL.quota_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def qtrees_create(context, values): """Create multiple qtrees.""" return IMPL.qtrees_create(context, values) def qtrees_update(context, values): """Update multiple qtrees.""" return IMPL.qtrees_update(context, values) def qtrees_delete(context, values): """Delete multiple qtrees.""" return IMPL.qtrees_delete(context, values) def qtree_create(context, values): """Create a qtree from the values dictionary.""" return IMPL.qtree_create(context, values) def qtree_update(context, qtree_id, values): """Update a qtree with the values dictionary.""" return IMPL.qtree_update(context, qtree_id, values) def qtree_get(context, qtree_id): """Get a qtree or raise an exception if it does not exist.""" return IMPL.qtree_get(context, qtree_id) def qtree_delete_by_storage(context, storage_id): """Delete a qtree or raise an exception if it does not exist.""" return IMPL.qtree_delete_by_storage(context, storage_id) def qtree_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all qtrees. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of controllers """ return IMPL.qtree_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def shares_create(context, values): """Create multiple shares.""" return IMPL.shares_create(context, values) def shares_update(context, values): """Update multiple shares.""" return IMPL.shares_update(context, values) def shares_delete(context, values): """Delete multiple shares.""" return IMPL.shares_delete(context, values) def share_create(context, values): """Create a share from the values dictionary.""" return IMPL.share_create(context, values) def share_update(context, share_id, values): """Update a share with the values dictionary.""" return IMPL.share_update(context, share_id, values) def share_get(context, share_id): """Get a share or raise an exception if it does not exist.""" return IMPL.share_get(context, share_id) def share_delete_by_storage(context, storage_id): """Delete a share or raise an exception if it does not exist.""" return IMPL.share_delete_by_storage(context, storage_id) def share_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all shares. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of controllers """ return IMPL.share_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def access_info_create(context, values): """Create a storage access information that used to connect to a specific storage device. """ return IMPL.access_info_create(context, values) def access_info_update(context, storage_id, values): """Update a storage access information with the values dictionary.""" return IMPL.access_info_update(context, storage_id, values) def access_info_get(context, storage_id): """Get a storage access information.""" return IMPL.access_info_get(context, storage_id) def access_info_delete(context, storage_id): """Delete a storage access information.""" return IMPL.access_info_delete(context, storage_id) def access_info_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage access information. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage accesses """ return IMPL.access_info_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def is_orm_value(obj): """Check if object is an ORM field.""" return IMPL.is_orm_value(obj) def alert_source_create(context, values): """Create an alert source.""" return IMPL.alert_source_create(context, values) def alert_source_update(context, storage_id, values): """Update an alert source.""" return IMPL.alert_source_update(context, storage_id, values) def alert_source_get(context, storage_id): """Get an alert source.""" return IMPL.alert_source_get(context, storage_id) def alert_source_delete(context, storage_id): """Delete an alert source.""" return IMPL.alert_source_delete(context, storage_id) def alert_source_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all alert sources. If no sort parameters are specified then the returned alert sources are sorted first by the 'created_at' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage accesses """ return IMPL.alert_source_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def task_create(context, values): """Create a task entry from the values dictionary.""" return IMPL.task_create(context, values) def task_update(context, task_id, values): """Update a task entry with the values dictionary.""" return IMPL.task_update(context, task_id, values) def task_get(context, task_id): """Get a task or raise an exception if it does not exist.""" return IMPL.task_get(context, task_id) def task_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all tasks. If no sort parameters are specified then the returned tasks are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of tasks """ return IMPL.task_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def task_delete_by_storage(context, storage_id): """Delete all tasks of given storage or raise an exception if it does not exist. """ return IMPL.task_delete_by_storage(context, storage_id) def task_delete(context, task_id): """Delete a given task or raise an exception if it does not exist. """ return IMPL.task_delete(context, task_id) def failed_task_create(context, values): """Create a failed task entry from the values dictionary.""" return IMPL.failed_task_create(context, values) def failed_task_update(context, failed_task_id, values): """Update a failed task with the values dictionary.""" return IMPL.failed_task_update(context, failed_task_id, values) def failed_task_get(context, failed_task_id): """Get a failed task or raise an exception if it does not exist.""" return IMPL.failed_task_get(context, failed_task_id) def failed_task_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all failed tasks. If no sort parameters are specified then the returned failed tasks are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of failed tasks """ return IMPL.failed_task_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def failed_task_delete_by_task_id(context, task_id): """Delete all failed tasks of given task id or raise an exception if it does not exist. """ return IMPL.failed_task_delete_by_task_id(context, task_id) def failed_task_delete(context, failed_task_id): """Delete a given failed task or raise an exception if it does not exist. """ return IMPL.failed_task_delete(context, failed_task_id) def failed_task_delete_by_storage(context, storage_id): """Delete all failed tasks of given storage or raise an exception if it does not exist. """ return IMPL.failed_task_delete_by_storage(context, storage_id) def storage_host_initiators_create(context, values): """Create a storage host initiator entry from the values dictionary.""" return IMPL.storage_host_initiators_create(context, values) def storage_host_initiators_update(context, values): """Update a storage host initiator with the values dictionary.""" return IMPL.storage_host_initiators_update(context, values) def storage_host_initiators_delete(context, values): """Delete multiple storage initiators.""" return IMPL.storage_host_initiators_delete(context, values) def storage_host_initiators_get(context, storage_host_initiator_id): """Get a storage host initiator or raise an exception if it does not exist. """ return IMPL.storage_host_initiators_get(context, storage_host_initiator_id) def storage_host_initiators_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage initiators. If no sort parameters are specified then the returned storage initiators are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage initiators """ return IMPL.storage_host_initiators_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def storage_host_initiators_delete_by_storage(context, storage_id): """Delete all the storage initiators of a device.""" return IMPL.storage_host_initiators_delete_by_storage(context, storage_id) def storage_hosts_create(context, values): """Create a storage host entry from the values dictionary.""" return IMPL.storage_hosts_create(context, values) def storage_hosts_update(context, values): """Update a storage host with the values dictionary.""" return IMPL.storage_hosts_update(context, values) def storage_hosts_delete(context, values): """Delete multiple storage hosts.""" return IMPL.storage_hosts_delete(context, values) def storage_hosts_get(context, storage_host_id): """Get a storage host or raise an exception if it does not exist.""" return IMPL.storage_hosts_get(context, storage_host_id) def storage_hosts_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage hosts. If no sort parameters are specified then the returned storage hosts are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage hosts """ return IMPL.storage_hosts_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def storage_hosts_delete_by_storage(context, storage_id): """Delete all the storage hosts of a device.""" return IMPL.storage_hosts_delete_by_storage(context, storage_id) def storage_host_groups_create(context, values): """Create a storage host grp entry from the values dictionary.""" return IMPL.storage_host_groups_create(context, values) def storage_host_groups_update(context, values): """Update a storage host grp with the values dictionary.""" return IMPL.storage_host_groups_update(context, values) def storage_host_groups_delete(context, values): """Delete multiple storage host groups.""" return IMPL.storage_host_groups_delete(context, values) def storage_host_groups_get(context, storage_host_grp_id): """Get a storage host group or raise an exception if it does not exist.""" return IMPL.storage_host_groups_get(context, storage_host_grp_id) def storage_host_groups_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage host groups. If no sort parameters are specified then the returned storage host groups are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage host groups """ return IMPL.storage_host_groups_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def storage_host_groups_delete_by_storage(context, storage_id): """Delete all the storage host groups of a device.""" return IMPL.storage_host_groups_delete_by_storage(context, storage_id) def port_groups_create(context, values): """Create a port group entry from the values dictionary.""" return IMPL.port_groups_create(context, values) def port_groups_update(context, values): """Update a port group with the values dictionary.""" return IMPL.port_groups_update(context, values) def port_groups_delete(context, values): """Delete multiple port groups.""" return IMPL.port_groups_delete(context, values) def port_groups_get(context, port_grp_id): """Get a port group or raise an exception if it does not exist.""" return IMPL.port_groups_get(context, port_grp_id) def port_groups_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all port groups. If no sort parameters are specified then the returned port groups are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of port groups """ return IMPL.port_groups_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def port_groups_delete_by_storage(context, storage_id): """Delete all the port groups of a device.""" return IMPL.port_groups_delete_by_storage(context, storage_id) def volume_groups_create(context, values): """Create a volume group entry from the values dictionary.""" return IMPL.volume_groups_create(context, values) def volume_groups_update(context, values): """Update a volume group with the values dictionary.""" return IMPL.volume_groups_update(context, values) def volume_groups_delete(context, values): """Delete multiple volume groups.""" return IMPL.volume_groups_delete(context, values) def volume_groups_get(context, volume_grp_id): """Get a volume group or raise an exception if it does not exist.""" return IMPL.volume_groups_get(context, volume_grp_id) def volume_groups_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volume groups. If no sort parameters are specified then the returned volume groups are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of volume groups """ return IMPL.volume_groups_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def volume_groups_delete_by_storage(context, storage_id): """Delete all the volume groups of a device.""" return IMPL.volume_groups_delete_by_storage(context, storage_id) def masking_views_create(context, values): """Create a masking view entry from the values dictionary.""" return IMPL.masking_views_create(context, values) def masking_views_update(context, values): """Update a masking view with the values dictionary.""" return IMPL.masking_views_update(context, values) def masking_views_delete(context, values): """Delete multiple masking views.""" return IMPL.masking_views_delete(context, values) def masking_views_get(context, masking_view_id): """Get a masking view or raise an exception if it does not exist.""" return IMPL.masking_views_get(context, masking_view_id) def masking_views_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all masking views. If no sort parameters are specified then the returned masking views are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of masking views """ return IMPL.masking_views_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def masking_views_delete_by_storage(context, storage_id): """Delete all the masking views of a device.""" return IMPL.masking_views_delete_by_storage(context, storage_id) def storage_host_grp_host_rels_create(context, values): """Create a storage host grp host relation entry from the values dictionary. """ return IMPL.storage_host_grp_host_rels_create(context, values) def storage_host_grp_host_rels_update(context, values): """Update a storage host grp host relation with the values dictionary.""" return IMPL.storage_host_grp_host_rels_update(context, values) def storage_host_grp_host_rels_delete(context, values): """Delete multiple storage host grp host relations.""" return IMPL.storage_host_grp_host_rels_delete(context, values) def storage_host_grp_host_rels_get(context, host_grp_host_relation_id): """Get a storage host grp host relation or raise an exception if it does not exist. """ return IMPL.storage_host_grp_host_rels_get(context, host_grp_host_relation_id) def storage_host_grp_host_rels_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage host grp host relation. If no sort parameters are specified then the returned storage host grp host relations are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of storage host grp host relations """ return IMPL.storage_host_grp_host_rels_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def storage_host_grp_host_rels_delete_by_storage(context, storage_id): """Delete all the storage host grp host relations of a device.""" return IMPL.storage_host_grp_host_rels_delete_by_storage(context, storage_id) def port_grp_port_rels_create(context, values): """Create a port grp port relation entry from the values dictionary. """ return IMPL.port_grp_port_rels_create(context, values) def port_grp_port_rels_update(context, values): """Update a port grp port relation with the values dictionary.""" return IMPL.port_grp_port_rels_update(context, values) def port_grp_port_rels_delete(context, values): """Delete multiple port grp port relations.""" return IMPL.port_grp_port_rels_delete(context, values) def port_grp_port_rels_get(context, port_grp_port_relation_id): """Get a port grp port relation or raise an exception if it does not exist. """ return IMPL.port_grp_port_rels_get(context, port_grp_port_relation_id) def port_grp_port_rels_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all port grp port relation. If no sort parameters are specified then the returned port grp port relations are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of port grp port relations """ return IMPL.port_grp_port_rels_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def port_grp_port_rels_delete_by_storage(context, storage_id): """Delete all the port grp port relations of a device.""" return IMPL.port_grp_port_rels_delete_by_storage(context, storage_id) def vol_grp_vol_rels_create(context, values): """Create a volume grp volume relation entry from the values dictionary. """ return IMPL.vol_grp_vol_rels_create(context, values) def vol_grp_vol_rels_update(context, values): """Update a volume grp volume relation with the values dictionary.""" return IMPL.vol_grp_vol_rels_update(context, values) def vol_grp_vol_rels_delete(context, values): """Delete multiple volume grp volume relations.""" return IMPL.vol_grp_vol_rels_delete(context, values) def vol_grp_vol_rels_get(context, volume_grp_volume_relation_id): """Get a volume grp volume relation or raise an exception if it does not exist. """ return IMPL.vol_grp_vol_rels_get(context, volume_grp_volume_relation_id) def vol_grp_vol_rels_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volume grp volume relation. If no sort parameters are specified then the returned volume grp volume relations are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context of this request, it's helpful to trace the request :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys, for example 'desc' for descending order :param filters: dictionary of filters :param offset: number of items to skip :returns: list of volume grp volume relations """ return IMPL.vol_grp_vol_rels_get_all(context, marker, limit, sort_keys, sort_dirs, filters, offset) def vol_grp_vol_rels_delete_by_storage(context, storage_id): """Delete all the volume grp volume relations of a device.""" return IMPL.vol_grp_vol_rels_delete_by_storage(context, storage_id) ================================================ FILE: delfin/db/base.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo_config import cfg from oslo_utils import importutils db_driver_opt = cfg.StrOpt('db_driver', default='delfin.db', help='Driver to use for database access.') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103 ================================================ FILE: delfin/db/sqlalchemy/__init__.py ================================================ ================================================ FILE: delfin/db/sqlalchemy/api.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import sys import six import sqlalchemy from oslo_config import cfg from oslo_db import options as db_options from oslo_db.sqlalchemy import session from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log from oslo_utils import uuidutils, timeutils from sqlalchemy import create_engine from delfin import exception from delfin.common import sqlalchemyutils from delfin.db.sqlalchemy import models from delfin.db.sqlalchemy.models import Storage, AccessInfo from delfin.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) _FACADE = None _DEFAULT_SQL_CONNECTION = 'sqlite:///' db_options.set_defaults(cfg.CONF, connection=_DEFAULT_SQL_CONNECTION) def apply_sorting(model, query, sort_key, sort_dir): if sort_dir.lower() not in ('desc', 'asc'): msg = (("Wrong sorting data provided: sort key is '%(sort_key)s' " "and sort order is '%(sort_dir)s'.") % {"sort_key": sort_key, "sort_dir": sort_dir}) raise exception.InvalidInput(msg) sort_attr = getattr(model, sort_key) sort_method = getattr(sort_attr, sort_dir.lower()) return query.order_by(sort_method()) def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = session.EngineFacade.from_config(cfg.CONF) return _FACADE def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def register_db(): """Create database and tables.""" models = (Storage, AccessInfo ) engine = create_engine(CONF.database.connection, echo=False) for model in models: model.metadata.create_all(engine) def _process_model_like_filter(model, query, filters): """Applies regex expression filtering to a query. :param model: model to apply filters to :param query: query to apply filters to :param filters: dictionary of filters with regex values :returns: the updated query. """ if query is None: return query for key in sorted(filters): column_attr = getattr(model, key) if 'property' == type(column_attr).__name__: continue value = filters[key] if not (isinstance(value, (six.string_types, int))): continue query = query.filter( column_attr.op('LIKE')(u'%%%s%%' % value)) return query def apply_like_filters(model): def decorator_filters(process_exact_filters): def _decorator(query, filters): exact_filters = filters.copy() regex_filters = {} for key, value in filters.items(): # NOTE(tommylikehu): For inexact match, the filter keys # are in the format of 'key~=value' if key.endswith('~'): exact_filters.pop(key) regex_filters[key.rstrip('~')] = value query = process_exact_filters(query, exact_filters) return _process_model_like_filter(model, query, regex_filters) return _decorator return decorator_filters def is_valid_model_filters(model, filters, exclude_list=None): """Return True if filter values exist on the model :param model: a Delfin model :param filters: dictionary of filters """ for key in filters.keys(): if exclude_list and key in exclude_list: continue if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("Metadata filter value is not valid dictionary") return False continue try: key = key.rstrip('~') getattr(model, key) except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return False return True def access_info_create(context, values): """Create a storage access information.""" if not values.get('storage_id'): values['storage_id'] = uuidutils.generate_uuid() access_info_ref = models.AccessInfo() access_info_ref.update(values) session = get_session() with session.begin(): session.add(access_info_ref) return _access_info_get(context, access_info_ref['storage_id'], session=session) def access_info_update(context, storage_id, values): """Update a storage access information with the values dictionary.""" session = get_session() with session.begin(): _access_info_get(context, storage_id, session).update(values) return _access_info_get(context, storage_id, session) def access_info_delete(context, storage_id): """Delete a storage access information.""" _access_info_get_query(context). \ filter_by(storage_id=storage_id).delete() def access_info_get(context, storage_id): """Get a storage access information.""" return _access_info_get(context, storage_id) def _access_info_get(context, storage_id, session=None): result = (_access_info_get_query(context, session=session) .filter_by(storage_id=storage_id) .first()) if not result: raise exception.AccessInfoNotFound(storage_id) return result def _access_info_get_query(context, session=None): return model_query(context, models.AccessInfo, session=session) def access_info_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage access information.""" session = get_session() with session.begin(): query = _generate_paginate_query(context, session, models.AccessInfo, marker, limit, sort_keys, sort_dirs, filters, offset, ) if query is None: return [] return query.all() @apply_like_filters(model=models.AccessInfo) def _process_access_info_filters(query, filters): """Common filter processing for AccessInfo queries.""" if filters: if not is_valid_model_filters(models.AccessInfo, filters): return query = query.filter_by(**filters) return query def storage_create(context, values): """Add a storage device from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() storage_ref = models.Storage() storage_ref.update(values) session = get_session() with session.begin(): session.add(storage_ref) return _storage_get(context, storage_ref['id'], session=session) def storage_update(context, storage_id, values): """Update a storage device with the values dictionary.""" session = get_session() with session.begin(): query = _storage_get_query(context, session) result = query.filter_by(id=storage_id).update(values) return result def storage_get(context, storage_id): """Retrieve a storage device.""" return _storage_get(context, storage_id) def _storage_get(context, storage_id, session=None): result = (_storage_get_query(context, session=session) .filter_by(id=storage_id) .first()) if not result: raise exception.StorageNotFound(storage_id) return result def _storage_get_query(context, session=None): read_deleted = context.read_deleted kwargs = dict() if read_deleted in ('no', 'n', False): kwargs['deleted'] = False elif read_deleted in ('yes', 'y', True): kwargs['deleted'] = True return model_query(context, models.Storage, session=session, **kwargs) def storage_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Storage, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No storages match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Storage) def _process_storage_info_filters(query, filters): """Common filter processing for Storages queries.""" if filters: if not is_valid_model_filters(models.Storage, filters): return query = query.filter_by(**filters) return query def storage_delete(context, storage_id): """Delete a storage device.""" delete_info = {'deleted': True, 'deleted_at': timeutils.utcnow()} _storage_get_query(context).filter_by(id=storage_id).update(delete_info) def _volume_get_query(context, session=None): return model_query(context, models.Volume, session=session) def _volume_get(context, volume_id, session=None): result = (_volume_get_query(context, session=session) .filter_by(id=volume_id) .first()) if not result: raise exception.VolumeNotFound(volume_id) return result def volume_create(context, values): """Create a volume.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() vol_ref = models.Volume() vol_ref.update(values) session = get_session() with session.begin(): session.add(vol_ref) return _volume_get(context, vol_ref['id'], session=session) def volumes_create(context, volumes): """Create multiple volumes.""" session = get_session() vol_refs = [] with session.begin(): for vol in volumes: LOG.debug('adding new volume for native_volume_id {0}:' .format(vol.get('native_volume_id'))) if not vol.get('id'): vol['id'] = uuidutils.generate_uuid() vol_ref = models.Volume() vol_ref.update(vol) vol_refs.append(vol_ref) session.add_all(vol_refs) return vol_refs def volumes_delete(context, volumes_id_list): """Delete multiple volumes.""" session = get_session() with session.begin(): for vol_id in volumes_id_list: LOG.debug('deleting volume {0}:'.format(vol_id)) query = _volume_get_query(context, session) result = query.filter_by(id=vol_id).delete() if not result: LOG.error(exception.VolumeNotFound(vol_id)) return def volume_update(context, vol_id, values): """Update a volume.""" session = get_session() with session.begin(): _volume_get(context, vol_id, session).update(values) return _volume_get(context, vol_id, session) def volumes_update(context, volumes): """Update multiple volumes.""" session = get_session() with session.begin(): for vol in volumes: LOG.debug('updating volume {0}:'.format(vol.get('id'))) query = _volume_get_query(context, session) result = query.filter_by(id=vol.get('id') ).update(vol) if not result: LOG.error(exception.VolumeNotFound(vol.get('id'))) def volume_get(context, volume_id): """Get a volume or raise an exception if it does not exist.""" return _volume_get(context, volume_id) def volume_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage volumes.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Volume, marker, limit, sort_keys, sort_dirs, filters, offset) # No volume would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Volume) def _process_volume_info_filters(query, filters): """Common filter processing for volumes queries.""" if filters: if not is_valid_model_filters(models.Volume, filters): return query = query.filter_by(**filters) return query def volume_delete_by_storage(context, storage_id): """Delete all the volumes of a device""" _volume_get_query(context).filter_by(storage_id=storage_id).delete() def _storage_pool_get_query(context, session=None): return model_query(context, models.StoragePool, session=session) def _storage_pool_get(context, storage_pool_id, session=None): result = (_storage_pool_get_query(context, session=session) .filter_by(id=storage_pool_id) .first()) if not result: raise exception.StoragePoolNotFound(storage_pool_id) return result def storage_pool_create(context, values): """Create a storage_pool from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() storage_pool_ref = models.StoragePool() storage_pool_ref.update(values) session = get_session() with session.begin(): session.add(storage_pool_ref) return _storage_pool_get(context, storage_pool_ref['id'], session=session) def storage_pools_create(context, storage_pools): """Create a storage_pool from the values dictionary.""" session = get_session() storage_pool_refs = [] with session.begin(): for storage_pool in storage_pools: LOG.debug('adding new storage_pool for native_storage_pool_id {0}:' .format(storage_pool.get('native_storage_pool_id'))) if not storage_pool.get('id'): storage_pool['id'] = uuidutils.generate_uuid() storage_pool_ref = models.StoragePool() storage_pool_ref.update(storage_pool) storage_pool_refs.append(storage_pool_ref) session.add_all(storage_pool_refs) return storage_pool_refs def storage_pools_delete(context, storage_pools_id_list): """Delete multiple storage_pools with the storage_pools dictionary.""" session = get_session() with session.begin(): for storage_pool_id in storage_pools_id_list: LOG.debug('deleting storage_pool {0}:'.format(storage_pool_id)) query = _storage_pool_get_query(context, session) result = query.filter_by(id=storage_pool_id).delete() if not result: LOG.error(exception.StoragePoolNotFound(storage_pool_id)) return def storage_pool_update(context, storage_pool_id, values): """Update a storage_pool withe the values dictionary.""" session = get_session() with session.begin(): query = _storage_pool_get_query(context, session) result = query.filter_by(id=storage_pool_id).update(values) if not result: raise exception.StoragePoolNotFound(storage_pool_id) return result def storage_pools_update(context, storage_pools): """Update multiple storage_pools withe the storage_pools dictionary.""" session = get_session() with session.begin(): storage_pool_refs = [] for storage_pool in storage_pools: LOG.debug('updating storage_pool {0}:'.format( storage_pool.get('id'))) query = _storage_pool_get_query(context, session) result = query.filter_by(id=storage_pool.get('id') ).update(storage_pool) if not result: LOG.error(exception.StoragePoolNotFound(storage_pool.get( 'id'))) else: storage_pool_refs.append(result) return storage_pool_refs def storage_pool_get(context, storage_pool_id): """Get a storage_pool or raise an exception if it does not exist.""" return _storage_pool_get(context, storage_pool_id) def storage_pool_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage storage_pools.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.StoragePool, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No storage_pool would match, return empty list if query is None: return [] return query.all() def storage_pool_delete_by_storage(context, storage_id): """Delete all the storage_pools of a storage device""" _storage_pool_get_query(context).filter_by(storage_id=storage_id).delete() @apply_like_filters(model=models.StoragePool) def _process_storage_pool_info_filters(query, filters): """Common filter processing for storage_pools queries.""" if filters: if not is_valid_model_filters(models.StoragePool, filters): return query = query.filter_by(**filters) return query def controllers_create(context, controllers): """Create multiple controllers.""" session = get_session() controllers_refs = [] with session.begin(): for controller in controllers: LOG.debug('adding new controller for native_controller_id {0}:' .format(controller.get('native_controller_id'))) if not controller.get('id'): controller['id'] = uuidutils.generate_uuid() controller_ref = models.Controller() controller_ref.update(controller) controllers_refs.append(controller_ref) session.add_all(controllers_refs) return controllers_refs def controllers_update(context, controllers): """Update multiple controllers.""" session = get_session() with session.begin(): controller_refs = [] for controller in controllers: LOG.debug('updating controller {0}:'.format( controller.get('id'))) query = _controller_get_query(context, session) result = query.filter_by(id=controller.get('id') ).update(controller) if not result: LOG.error(exception.ControllerNotFound(controller.get( 'id'))) else: controller_refs.append(result) return controller_refs def controllers_delete(context, controllers_id_list): """Delete multiple controllers.""" session = get_session() with session.begin(): for controller_id in controllers_id_list: LOG.debug('deleting controller {0}:'.format(controller_id)) query = _controller_get_query(context, session) result = query.filter_by(id=controller_id).delete() if not result: LOG.error(exception.ControllerNotFound(controller_id)) return def _controller_get_query(context, session=None): return model_query(context, models.Controller, session=session) def _controller_get(context, controller_id, session=None): result = (_controller_get_query(context, session=session) .filter_by(id=controller_id) .first()) if not result: raise exception.ControllerNotFound(controller_id) return result def controller_create(context, values): """Create a controller from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() controller_ref = models.Controller() controller_ref.update(values) session = get_session() with session.begin(): session.add(controller_ref) return _controller_get(context, controller_ref['id'], session=session) def controller_update(context, controller_id, values): """Update a controller with the values dictionary.""" session = get_session() with session.begin(): query = _controller_get_query(context, session) result = query.filter_by(id=controller_id).update(values) if not result: raise exception.ControllerNotFound(controller_id) return result def controller_get(context, controller_id): """Get a controller or raise an exception if it does not exist.""" return _controller_get(context, controller_id) def controller_delete_by_storage(context, storage_id): """Delete a controller or raise an exception if it does not exist.""" _controller_get_query(context).filter_by(storage_id=storage_id).delete() def controller_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all controllers.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Controller, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No Controller would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Controller) def _process_controller_info_filters(query, filters): """Common filter processing for controllers queries.""" if filters: if not is_valid_model_filters(models.Controller, filters): return query = query.filter_by(**filters) return query def ports_create(context, ports): """Create multiple ports.""" session = get_session() ports_refs = [] with session.begin(): for port in ports: LOG.debug('adding new port for native_port_id {0}:' .format(port.get('native_port_id'))) if not port.get('id'): port['id'] = uuidutils.generate_uuid() port_ref = models.Port() port_ref.update(port) ports_refs.append(port_ref) session.add_all(ports_refs) return ports_refs def ports_update(context, ports): """Update multiple ports.""" session = get_session() with session.begin(): port_refs = [] for port in ports: LOG.debug('updating port {0}:'.format( port.get('id'))) query = _port_get_query(context, session) result = query.filter_by(id=port.get('id') ).update(port) if not result: LOG.error(exception.PortNotFound(port.get( 'id'))) else: port_refs.append(result) return port_refs def ports_delete(context, ports_id_list): """Delete multiple ports.""" session = get_session() with session.begin(): for port_id in ports_id_list: LOG.debug('deleting port {0}:'.format(port_id)) query = _port_get_query(context, session) result = query.filter_by(id=port_id).delete() if not result: LOG.error(exception.PortNotFound(port_id)) return def _port_get_query(context, session=None): return model_query(context, models.Port, session=session) def _port_get(context, port_id, session=None): result = (_port_get_query(context, session=session) .filter_by(id=port_id) .first()) if not result: raise exception.PortNotFound(port_id) return result def port_create(context, values): """Create a port from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() port_ref = models.Port() port_ref.update(values) session = get_session() with session.begin(): session.add(port_ref) return _port_get(context, port_ref['id'], session=session) def port_update(context, port_id, values): """Update a port with the values dictionary.""" session = get_session() with session.begin(): query = _port_get_query(context, session) result = query.filter_by(id=port_id).update(values) if not result: raise exception.PortNotFound(port_id) return result def port_get(context, port_id): """Get a port or raise an exception if it does not exist.""" return _port_get(context, port_id) def port_delete_by_storage(context, storage_id): """Delete port or raise an exception if it does not exist.""" _port_get_query(context).filter_by(storage_id=storage_id).delete() def port_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all ports.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Port, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No Port would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Port) def _process_port_info_filters(query, filters): """Common filter processing for ports queries.""" if filters: if not is_valid_model_filters(models.Port, filters): return query = query.filter_by(**filters) return query def disks_create(context, disks): """Create multiple disks.""" session = get_session() disks_refs = [] with session.begin(): for disk in disks: LOG.debug('adding new disk for native_disk_id {0}:' .format(disk.get('native_disk_id'))) if not disk.get('id'): disk['id'] = uuidutils.generate_uuid() disk_ref = models.Disk() disk_ref.update(disk) disks_refs.append(disk_ref) session.add_all(disks_refs) return disks_refs def disks_update(context, disks): """Update multiple disks.""" session = get_session() with session.begin(): disk_refs = [] for disk in disks: LOG.debug('updating disk {0}:'.format( disk.get('id'))) query = _disk_get_query(context, session) result = query.filter_by(id=disk.get('id') ).update(disk) if not result: LOG.error(exception.DiskNotFound(disk.get( 'id'))) else: disk_refs.append(result) return disk_refs def disks_delete(context, disks_id_list): """Delete multiple disks.""" session = get_session() with session.begin(): for disk_id in disks_id_list: LOG.debug('deleting disk {0}:'.format(disk_id)) query = _disk_get_query(context, session) result = query.filter_by(id=disk_id).delete() if not result: LOG.error(exception.DiskNotFound(disk_id)) return def _disk_get_query(context, session=None): return model_query(context, models.Disk, session=session) def _disk_get(context, disk_id, session=None): result = (_disk_get_query(context, session=session) .filter_by(id=disk_id) .first()) if not result: raise exception.DiskNotFound(disk_id) return result def disk_create(context, values): """Create a disk from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() disk_ref = models.Disk() disk_ref.update(values) session = get_session() with session.begin(): session.add(disk_ref) return _disk_get(context, disk_ref['id'], session=session) def disk_update(context, disk_id, values): """Update a disk with the values dictionary.""" session = get_session() with session.begin(): query = _disk_get_query(context, session) result = query.filter_by(id=disk_id).update(values) if not result: raise exception.DiskNotFound(disk_id) return result def disk_get(context, disk_id): """Get a disk or raise an exception if it does not exist.""" return _disk_get(context, disk_id) def disk_delete_by_storage(context, storage_id): """Delete disk or raise an exception if it does not exist.""" _disk_get_query(context).filter_by(storage_id=storage_id).delete() def disk_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all disks.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Disk, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No Disk would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Disk) def _process_disk_info_filters(query, filters): """Common filter processing for disks queries.""" if filters: if not is_valid_model_filters(models.Disk, filters): return query = query.filter_by(**filters) return query def filesystems_create(context, filesystems): """Create multiple filesystems.""" session = get_session() filesystems_refs = [] with session.begin(): for filesystem in filesystems: LOG.debug('adding new filesystem for native_filesystem_id {0}:' .format(filesystem.get('native_filesystem_id'))) if not filesystem.get('id'): filesystem['id'] = uuidutils.generate_uuid() filesystem_ref = models.Filesystem() filesystem_ref.update(filesystem) filesystems_refs.append(filesystem_ref) session.add_all(filesystems_refs) return filesystems_refs def filesystems_update(context, filesystems): """Update multiple filesystems.""" session = get_session() with session.begin(): filesystem_refs = [] for filesystem in filesystems: LOG.debug('updating filesystem {0}:'.format( filesystem.get('id'))) query = _filesystem_get_query(context, session) result = query.filter_by(id=filesystem.get('id') ).update(filesystem) if not result: LOG.error(exception.FilesystemNotFound(filesystem.get( 'id'))) else: filesystem_refs.append(result) return filesystem_refs def filesystems_delete(context, filesystems_id_list): """Delete multiple filesystems.""" session = get_session() with session.begin(): for filesystem_id in filesystems_id_list: LOG.debug('deleting filesystem {0}:'.format(filesystem_id)) query = _filesystem_get_query(context, session) result = query.filter_by(id=filesystem_id).delete() if not result: LOG.error(exception.FilesystemNotFound(filesystem_id)) return def _filesystem_get_query(context, session=None): return model_query(context, models.Filesystem, session=session) def _filesystem_get(context, filesystem_id, session=None): result = (_filesystem_get_query(context, session=session) .filter_by(id=filesystem_id) .first()) if not result: raise exception.FilesystemNotFound(filesystem_id) return result def filesystem_create(context, values): """Create a filesystem from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() filesystem_ref = models.Filesystem() filesystem_ref.update(values) session = get_session() with session.begin(): session.add(filesystem_ref) return _filesystem_get(context, filesystem_ref['id'], session=session) def filesystem_update(context, filesystem_id, values): """Update a filesystem with the values dictionary.""" session = get_session() with session.begin(): query = _filesystem_get_query(context, session) result = query.filter_by(id=filesystem_id).update(values) if not result: raise exception.FilesystemNotFound(filesystem_id) return result def filesystem_get(context, filesystem_id): """Get a filesystem or raise an exception if it does not exist.""" return _filesystem_get(context, filesystem_id) def filesystem_delete_by_storage(context, storage_id): """Delete filesystem or raise an exception if it does not exist.""" _filesystem_get_query(context).filter_by(storage_id=storage_id).delete() def filesystem_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all filesystems.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Filesystem, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No Filesystem would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Filesystem) def _process_filesystem_info_filters(query, filters): """Common filter processing for filesystems queries.""" if filters: if not is_valid_model_filters(models.Filesystem, filters): return query = query.filter_by(**filters) return query def quotas_create(context, quotas): """Create multiple quotas.""" session = get_session() quotas_refs = [] with session.begin(): for quota in quotas: LOG.debug('adding new quota for native_quota_id {0}:' .format(quota.get('native_quota_id'))) if not quota.get('id'): quota['id'] = uuidutils.generate_uuid() quota_ref = models.Quota() quota_ref.update(quota) quotas_refs.append(quota_ref) session.add_all(quotas_refs) return quotas_refs def quotas_update(context, quotas): """Update multiple quotas.""" session = get_session() with session.begin(): quota_refs = [] for quota in quotas: LOG.debug('updating quota {0}:'.format( quota.get('id'))) query = _quota_get_query(context, session) result = query.filter_by(id=quota.get('id') ).update(quota) if not result: LOG.error(exception.QuotaNotFound(quota.get( 'id'))) else: quota_refs.append(result) return quota_refs def quotas_delete(context, quotas_id_list): """Delete multiple quotas.""" session = get_session() with session.begin(): for quota_id in quotas_id_list: LOG.debug('deleting quota {0}:'.format(quota_id)) query = _quota_get_query(context, session) result = query.filter_by(id=quota_id).delete() if not result: LOG.error(exception.QuotaNotFound(quota_id)) return def _quota_get_query(context, session=None): return model_query(context, models.Quota, session=session) def _quota_get(context, quota_id, session=None): result = (_quota_get_query(context, session=session) .filter_by(id=quota_id) .first()) if not result: raise exception.QuotaNotFound(quota_id) return result def quota_create(context, values): """Create a quota from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() quota_ref = models.Quota() quota_ref.update(values) session = get_session() with session.begin(): session.add(quota_ref) return _quota_get(context, quota_ref['id'], session=session) def quota_update(context, quota_id, values): """Update a quota with the values dictionary.""" session = get_session() with session.begin(): query = _quota_get_query(context, session) result = query.filter_by(id=quota_id).update(values) if not result: raise exception.QuotaNotFound(quota_id) return result def quota_get(context, quota_id): """Get a quota or raise an exception if it does not exist.""" return _quota_get(context, quota_id) def quota_delete_by_storage(context, storage_id): """Delete quota or raise an exception if it does not exist.""" _quota_get_query(context).filter_by(storage_id=storage_id).delete() def quota_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all quotas.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Quota, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No Quota would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Quota) def _process_quota_info_filters(query, filters): """Common filter processing for quotas queries.""" if filters: if not is_valid_model_filters(models.Quota, filters): return query = query.filter_by(**filters) return query def qtrees_create(context, qtrees): """Create multiple qtrees.""" session = get_session() qtrees_refs = [] with session.begin(): for qtree in qtrees: LOG.debug('adding new qtree for native_qtree_id {0}:' .format(qtree.get('native_qtree_id'))) if not qtree.get('id'): qtree['id'] = uuidutils.generate_uuid() qtree_ref = models.Qtree() qtree_ref.update(qtree) qtrees_refs.append(qtree_ref) session.add_all(qtrees_refs) return qtrees_refs def qtrees_update(context, qtrees): """Update multiple qtrees.""" session = get_session() with session.begin(): qtree_refs = [] for qtree in qtrees: LOG.debug('updating qtree {0}:'.format( qtree.get('id'))) query = _qtree_get_query(context, session) result = query.filter_by(id=qtree.get('id') ).update(qtree) if not result: LOG.error(exception.QtreeNotFound(qtree.get( 'id'))) else: qtree_refs.append(result) return qtree_refs def qtrees_delete(context, qtrees_id_list): """Delete multiple qtrees.""" session = get_session() with session.begin(): for qtree_id in qtrees_id_list: LOG.debug('deleting qtree {0}:'.format(qtree_id)) query = _qtree_get_query(context, session) result = query.filter_by(id=qtree_id).delete() if not result: LOG.error(exception.QtreeNotFound(qtree_id)) return def _qtree_get_query(context, session=None): return model_query(context, models.Qtree, session=session) def _qtree_get(context, qtree_id, session=None): result = (_qtree_get_query(context, session=session) .filter_by(id=qtree_id) .first()) if not result: raise exception.QtreeNotFound(qtree_id) return result def qtree_create(context, values): """Create a qtree from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() qtree_ref = models.Qtree() qtree_ref.update(values) session = get_session() with session.begin(): session.add(qtree_ref) return _qtree_get(context, qtree_ref['id'], session=session) def qtree_update(context, qtree_id, values): """Update a qtree with the values dictionary.""" session = get_session() with session.begin(): query = _qtree_get_query(context, session) result = query.filter_by(id=qtree_id).update(values) if not result: raise exception.QtreeNotFound(qtree_id) return result def qtree_get(context, qtree_id): """Get a qtree or raise an exception if it does not exist.""" return _qtree_get(context, qtree_id) def qtree_delete_by_storage(context, storage_id): """Delete qtree or raise an exception if it does not exist.""" _qtree_get_query(context).filter_by(storage_id=storage_id).delete() def qtree_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all qtrees.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Qtree, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No Qtree would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Qtree) def _process_qtree_info_filters(query, filters): """Common filter processing for qtrees queries.""" if filters: if not is_valid_model_filters(models.Qtree, filters): return query = query.filter_by(**filters) return query def shares_create(context, shares): """Create multiple shares.""" session = get_session() shares_refs = [] with session.begin(): for share in shares: LOG.debug('adding new share for native_share_id {0}:' .format(share.get('native_share_id'))) if not share.get('id'): share['id'] = uuidutils.generate_uuid() share_ref = models.Share() share_ref.update(share) shares_refs.append(share_ref) session.add_all(shares_refs) return shares_refs def shares_update(context, shares): """Update multiple shares.""" session = get_session() with session.begin(): share_refs = [] for share in shares: LOG.debug('updating share {0}:'.format( share.get('id'))) query = _share_get_query(context, session) result = query.filter_by(id=share.get('id') ).update(share) if not result: LOG.error(exception.ShareNotFound(share.get( 'id'))) else: share_refs.append(result) return share_refs def shares_delete(context, shares_id_list): """Delete multiple shares.""" session = get_session() with session.begin(): for share_id in shares_id_list: LOG.debug('deleting share {0}:'.format(share_id)) query = _share_get_query(context, session) result = query.filter_by(id=share_id).delete() if not result: LOG.error(exception.ShareNotFound(share_id)) return def _share_get_query(context, session=None): return model_query(context, models.Share, session=session) def _share_get(context, share_id, session=None): result = (_share_get_query(context, session=session) .filter_by(id=share_id) .first()) if not result: raise exception.ShareNotFound(share_id) return result def share_create(context, values): """Create a share from the values dictionary.""" if not values.get('id'): values['id'] = uuidutils.generate_uuid() share_ref = models.Share() share_ref.update(values) session = get_session() with session.begin(): session.add(share_ref) return _share_get(context, share_ref['id'], session=session) def share_update(context, share_id, values): """Update a share with the values dictionary.""" session = get_session() with session.begin(): query = _share_get_query(context, session) result = query.filter_by(id=share_id).update(values) if not result: raise exception.ShareNotFound(share_id) return result def share_get(context, share_id): """Get a share or raise an exception if it does not exist.""" return _share_get(context, share_id) def share_delete_by_storage(context, storage_id): """Delete share or raise an exception if it does not exist.""" _share_get_query(context).filter_by(storage_id=storage_id).delete() def share_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all shares.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Share, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No Share would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Share) def _process_share_info_filters(query, filters): """Common filter processing for shares queries.""" if filters: if not is_valid_model_filters(models.Share, filters): return query = query.filter_by(**filters) return query def is_orm_value(obj): """Check if object is an ORM field or expression.""" return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, sqlalchemy.sql.expression.ColumnElement)) def model_query(context, model, *args, **kwargs): """Query helper for model query. :param context: context to query under :param model: model to query. Must be a subclass of ModelBase. :param session: if present, the session to use """ session = kwargs.pop('session') or get_session() return db_utils.model_query( model=model, session=session, args=args, **kwargs) def alert_source_get(context, storage_id): """Get an alert source or raise an exception if it does not exist.""" return _alert_source_get(context, storage_id) def _alert_source_get(context, storage_id, session=None): result = (_alert_source_get_query(context, session=session) .filter_by(storage_id=storage_id) .first()) if not result: raise exception.AlertSourceNotFound(storage_id) return result def _alert_source_get_query(context, session=None): return model_query(context, models.AlertSource, session=session) @apply_like_filters(model=models.AlertSource) def _process_alert_source_filters(query, filters): """Common filter processing for alert source queries.""" if filters: if not is_valid_model_filters(models.AlertSource, filters): return query = query.filter_by(**filters) return query def alert_source_create(context, values): """Add an alert source configuration.""" alert_source_ref = models.AlertSource() alert_source_ref.update(values) session = get_session() with session.begin(): session.add(alert_source_ref) return _alert_source_get(context, alert_source_ref['storage_id'], session=session) def alert_source_update(context, storage_id, values): """Update an alert source configuration.""" session = get_session() with session.begin(): _alert_source_get(context, storage_id, session).update(values) return _alert_source_get(context, storage_id, session) def alert_source_delete(context, storage_id): session = get_session() with session.begin(): query = _alert_source_get_query(context, session) result = query.filter_by(storage_id=storage_id).delete() if not result: LOG.error("Cannot delete non-exist alert source[storage_id=%s]." % storage_id) raise exception.AlertSourceNotFound(storage_id) else: LOG.info("Delete alert source[storage_id=%s] successfully." % storage_id) def alert_source_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): session = get_session() with session.begin(): query = _generate_paginate_query(context, session, models.AlertSource, marker, limit, sort_keys, sort_dirs, filters, offset) if query is None: return [] return query.all() def task_create(context, values): """Add task configuration.""" tasks_ref = models.Task() tasks_ref.update(values) session = get_session() with session.begin(): session.add(tasks_ref) return _task_get(context, tasks_ref['id'], session=session) def task_update(context, tasks_id, values): """Update a task attributes withe the values dictionary.""" session = get_session() with session.begin(): query = _task_get_query(context, session) result = query.filter_by(id=tasks_id).update(values) if not result: raise exception.TaskNotFound(tasks_id) return result def _task_get(context, task_id, session=None): result = (_task_get_query(context, session=session) .filter_by(id=task_id) .first()) if not result: raise exception.TaskNotFound(task_id) return result def _task_get_query(context, session=None): return model_query(context, models.Task, session=session) def task_get(context, tasks_id): """Get a task or raise an exception if it does not exist.""" return _task_get(context, tasks_id) def task_delete_by_storage(context, storage_id): """Delete all the tasks of a storage device""" delete_info = {'deleted': True, 'deleted_at': timeutils.utcnow()} _task_get_query(context).filter_by( storage_id=storage_id).update(delete_info) def task_delete(context, tasks_id): """Delete a given task""" _task_get_query(context).filter_by(id=tasks_id).delete() def task_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all tasks of a storage.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.Task, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No task entry would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.Task) def _process_tasks_info_filters(query, filters): """Common filter processing for task table queries.""" if filters: if not is_valid_model_filters(models.Task, filters): return query = query.filter_by(**filters) return query def failed_task_create(context, values): """Add failed task configuration.""" failed_task_ref = models.FailedTask() failed_task_ref.update(values) session = get_session() with session.begin(): session.add(failed_task_ref) return _failed_tasks_get(context, failed_task_ref['id'], session=session) def failed_task_update(context, failed_task_id, values): """Update a failed task withe the values dictionary.""" session = get_session() with session.begin(): query = _failed_tasks_get_query(context, session) result = query.filter_by(id=failed_task_id).update(values) if not result: raise exception.FailedTaskNotFound(failed_task_id) return result def _failed_tasks_get(context, failed_task_id, session=None): result = (_failed_tasks_get_query(context, session=session) .filter_by(id=failed_task_id) .first()) if not result: raise exception.FailedTaskNotFound(failed_task_id) return result def _failed_tasks_get_query(context, session=None): return model_query(context, models.FailedTask, session=session) def failed_task_get(context, failed_task_id): """Get a failed task or raise an exception if it does not exist.""" return _failed_tasks_get(context, failed_task_id) def failed_task_delete_by_task_id(context, task_id): """Delete all the failed tasks of a given task id""" _failed_tasks_get_query(context).filter_by( task_id=task_id).delete() def failed_task_delete_by_storage(context, storage_id): """Delete all the failed tasks of a storage device""" delete_info = {'deleted': True, 'deleted_at': timeutils.utcnow()} _failed_tasks_get_query(context).filter_by( storage_id=storage_id).update(delete_info) def failed_task_delete(context, failed_task_id): """Delete a given failed task""" _failed_tasks_get_query(context).filter_by(id=failed_task_id).delete() def failed_task_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all failed tasks.""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.FailedTask, marker, limit, sort_keys, sort_dirs, filters, offset, ) # No failed task would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.FailedTask) def _process_failed_tasks_info_filters(query, filters): """Common filter processing for failed task queries.""" if filters: if not is_valid_model_filters(models.FailedTask, filters): return query = query.filter_by(**filters) return query def _storage_host_initiators_get_query(context, session=None): return model_query(context, models.StorageHostInitiator, session=session) def _storage_host_initiators_get(context, storage_host_initiator_id, session=None): result = (_storage_host_initiators_get_query(context, session=session) .filter_by(id=storage_host_initiator_id) .first()) if not result: raise exception.StorageHostInitiatorNotFound(storage_host_initiator_id) return result def storage_host_initiators_create(context, storage_host_initiators): """Create multiple storage initiators.""" session = get_session() initiator_refs = [] with session.begin(): for initiator in storage_host_initiators: LOG.debug('Adding new storage host initiator for ' 'native_storage_host_initiator_id {0}:' .format(initiator .get('native_storage_host_initiator_id'))) if not initiator.get('id'): initiator['id'] = uuidutils.generate_uuid() initiator_ref = models.StorageHostInitiator() initiator_ref.update(initiator) initiator_refs.append(initiator_ref) session.add_all(initiator_refs) return initiator_refs def storage_host_initiators_delete(context, storage_host_initiators_id_list): """Delete multiple storage initiators.""" session = get_session() with session.begin(): for initiator_id in storage_host_initiators_id_list: LOG.debug('Deleting storage host initiator{0}:' .format(initiator_id)) query = _storage_host_initiators_get_query(context, session) result = query.filter_by(id=initiator_id).delete() if not result: LOG.error(exception.StorageHostInitiatorNotFound(initiator_id)) return def storage_host_initiators_update(context, storage_host_initiators): """Update multiple storage initiators.""" session = get_session() with session.begin(): for initiator in storage_host_initiators: LOG.debug('Updating storage host initiator{0}:' .format(initiator.get('id'))) query = _storage_host_initiators_get_query(context, session) result = query.filter_by(id=initiator.get('id') ).update(initiator) if not result: LOG.error(exception.StorageHostInitiatorNotFound(initiator .get('id'))) def storage_host_initiators_get(context, storage_host_initiator_id): """Get a storage host initiator or raise an exception if it does not exist. """ return _storage_host_initiators_get(context, storage_host_initiator_id) def storage_host_initiators_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage initiators""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.StorageHostInitiator, marker, limit, sort_keys, sort_dirs, filters, offset) # No storage host initiator would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.StorageHostInitiator) def _process_storage_host_initiators_info_filters(query, filters): """Common filter processing for storage initiators queries.""" if filters: if not is_valid_model_filters(models.StorageHostInitiator, filters): return query = query.filter_by(**filters) return query def storage_host_initiators_delete_by_storage(context, storage_id): """Delete all the storage initiators of a device""" _storage_host_initiators_get_query(context)\ .filter_by(storage_id=storage_id).delete() def _storage_hosts_get_query(context, session=None): return model_query(context, models.StorageHost, session=session) def _storage_hosts_get(context, storage_host_id, session=None): result = (_storage_hosts_get_query(context, session=session) .filter_by(id=storage_host_id) .first()) if not result: raise exception.StorageHostNotFound(storage_host_id) return result def storage_hosts_create(context, storage_hosts): """Create multiple storage hosts.""" session = get_session() host_refs = [] with session.begin(): for host in storage_hosts: LOG.debug('Adding new storage host for ' 'native_host_id {0}:' .format(host.get('native_host_id'))) if not host.get('id'): host['id'] = uuidutils.generate_uuid() host_ref = models.StorageHost() host_ref.update(host) host_refs.append(host_ref) session.add_all(host_refs) return host_refs def storage_hosts_delete(context, storage_hosts_id_list): """Delete multiple storage hosts.""" session = get_session() with session.begin(): for host_id in storage_hosts_id_list: LOG.debug('Deleting volume {0}:'.format(host_id)) query = _storage_hosts_get_query(context, session) result = query.filter_by(id=host_id).delete() if not result: LOG.error(exception.StorageHostNotFound(host_id)) return def storage_hosts_update(context, storage_hosts): """Update multiple storage hosts.""" session = get_session() with session.begin(): for host in storage_hosts: LOG.debug('Updating storage hosts {0}:'.format(host.get('id'))) query = _storage_hosts_get_query(context, session) result = query.filter_by(id=host.get('id') ).update(host) if not result: LOG.error(exception.StorageHostNotFound(host .get('id'))) def storage_hosts_get(context, storage_host_id): """Get a storage host or raise an exception if it does not exist.""" return _storage_hosts_get(context, storage_host_id) def storage_hosts_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage hosts""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.StorageHost, marker, limit, sort_keys, sort_dirs, filters, offset) # No storage host would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.StorageHost) def _process_storage_hosts_info_filters(query, filters): """Common filter processing for storage hosts queries.""" if filters: if not is_valid_model_filters(models.StorageHost, filters): return query = query.filter_by(**filters) return query def storage_hosts_delete_by_storage(context, storage_id): """Delete all the storage hosts of a device""" _storage_hosts_get_query(context).filter_by(storage_id=storage_id) \ .delete() def _storage_host_groups_get_query(context, session=None): return model_query(context, models.StorageHostGroup, session=session) def _storage_host_groups_get(context, storage_host_grp_id, session=None): result = (_storage_host_groups_get_query(context, session=session) .filter_by(id=storage_host_grp_id) .first()) if not result: raise exception.StorageHostGroupNotFound(storage_host_grp_id) return result def storage_host_groups_create(context, storage_host_groups): """Create multiple storage host groups.""" session = get_session() host_groups_refs = [] with session.begin(): for host_group in storage_host_groups: LOG.debug('Adding new storage host group for ' 'native_storage_host_group_id {0}:' .format(host_group.get('native_storage_host_group_id'))) if not host_group.get('id'): host_group['id'] = uuidutils.generate_uuid() host_group_ref = models.StorageHostGroup() host_group_ref.update(host_group) host_groups_refs.append(host_group_ref) session.add_all(host_groups_refs) return host_groups_refs def storage_host_groups_delete(context, storage_host_groups_id_list): """Delete multiple storage host groups.""" session = get_session() with session.begin(): for host_group_id in storage_host_groups_id_list: LOG.debug('Deleting storage host group {0}:'.format(host_group_id)) query = _storage_host_groups_get_query(context, session) result = query.filter_by(id=host_group_id).delete() if not result: LOG.error(exception.StorageHostGroupNotFound(host_group_id)) return def storage_host_groups_update(context, storage_host_groups): """Update multiple storage host groups.""" session = get_session() with session.begin(): for host_group in storage_host_groups: LOG.debug('Updating storage host groups {0}:' .format(host_group.get('id'))) query = _storage_host_groups_get_query(context, session) result = query.filter_by(id=host_group.get('id') ).update(host_group) if not result: LOG.error(exception.StorageHostGroupNotFound(host_group .get('id'))) def storage_host_groups_get(context, storage_host_group_id): """Get a storage host group or raise an exception if it does not exist.""" return _storage_host_groups_get(context, storage_host_group_id) def storage_host_groups_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage host groups""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.StorageHostGroup, marker, limit, sort_keys, sort_dirs, filters, offset) # No storage host group would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.StorageHostGroup) def _process_storage_host_groups_info_filters(query, filters): """Common filter processing for storage host groups queries.""" if filters: if not is_valid_model_filters(models.StorageHostGroup, filters): return query = query.filter_by(**filters) return query def storage_host_groups_delete_by_storage(context, storage_id): """Delete all the storage host groups of a device""" _storage_host_groups_get_query(context).filter_by(storage_id=storage_id)\ .delete() def _port_groups_get_query(context, session=None): return model_query(context, models.PortGroup, session=session) def _port_groups_get(context, port_grp_id, session=None): result = (_port_groups_get_query(context, session=session) .filter_by(id=port_grp_id) .first()) if not result: raise exception.PortGroupNotFound(port_grp_id) return result def port_groups_create(context, port_groups): """Create multiple port groups.""" session = get_session() port_groups_refs = [] with session.begin(): for port_group in port_groups: LOG.debug('Adding new port group for ' 'native_port_group_id {0}:' .format(port_group.get('native_port_group_id'))) if not port_group.get('id'): port_group['id'] = uuidutils.generate_uuid() port_group_ref = models.PortGroup() port_group_ref.update(port_group) port_groups_refs.append(port_group_ref) session.add_all(port_groups_refs) return port_groups_refs def port_groups_delete(context, port_groups_id_list): """Delete multiple port groups.""" session = get_session() with session.begin(): for port_group_id in port_groups_id_list: LOG.debug('Deleting port group {0}:'.format(port_group_id)) query = _port_groups_get_query(context, session) result = query.filter_by(id=port_group_id).delete() if not result: LOG.error(exception.PortGroupNotFound(port_group_id)) return def port_groups_update(context, port_groups): """Update multiple port groups.""" session = get_session() with session.begin(): for port_group in port_groups: LOG.debug('Updating port groups {0}:' .format(port_group.get('id'))) query = _port_groups_get_query(context, session) result = query.filter_by(id=port_group.get('id') ).update(port_group) if not result: LOG.error(exception.PortGroupNotFound(port_group .get('id'))) def port_groups_get(context, port_group_id): """Get a port group or raise an exception if it does not exist.""" return _port_groups_get(context, port_group_id) def port_groups_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all port groups""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.PortGroup, marker, limit, sort_keys, sort_dirs, filters, offset) # No port group would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.PortGroup) def _process_port_groups_info_filters(query, filters): """Common filter processing for port groups queries.""" if filters: if not is_valid_model_filters(models.PortGroup, filters): return query = query.filter_by(**filters) return query def port_groups_delete_by_storage(context, storage_id): """Delete all the port groups of a device""" _port_groups_get_query(context).filter_by(storage_id=storage_id).delete() def _volume_groups_get_query(context, session=None): return model_query(context, models.VolumeGroup, session=session) def _volume_groups_get(context, volume_grp_id, session=None): result = (_volume_groups_get_query(context, session=session) .filter_by(id=volume_grp_id) .first()) if not result: raise exception.VolumeGroupNotFound(volume_grp_id) return result def volume_groups_create(context, volume_groups): """Create multiple volume groups.""" session = get_session() volume_groups_refs = [] with session.begin(): for volume_group in volume_groups: LOG.debug('Adding new volume group for ' 'native_volume_group_id {0}:' .format(volume_group.get('native_volume_group_id'))) if not volume_group.get('id'): volume_group['id'] = uuidutils.generate_uuid() volume_group_ref = models.VolumeGroup() volume_group_ref.update(volume_group) volume_groups_refs.append(volume_group_ref) session.add_all(volume_groups_refs) return volume_groups_refs def volume_groups_delete(context, volume_groups_id_list): """Delete multiple volume groups.""" session = get_session() with session.begin(): for volume_group_id in volume_groups_id_list: LOG.debug('Deleting volume group {0}:'.format(volume_group_id)) query = _volume_groups_get_query(context, session) result = query.filter_by(id=volume_group_id).delete() if not result: LOG.error(exception.VolumeGroupNotFound(volume_group_id)) return def volume_groups_update(context, volume_groups): """Update multiple volume groups.""" session = get_session() with session.begin(): for volume_group in volume_groups: LOG.debug('Updating volume groups {0}:' .format(volume_group.get('id'))) query = _volume_groups_get_query(context, session) result = query.filter_by(id=volume_group.get('id') ).update(volume_group) if not result: LOG.error(exception.VolumeGroupNotFound(volume_group .get('id'))) def volume_groups_get(context, volume_group_id): """Get a volume group or raise an exception if it does not exist.""" return _volume_groups_get(context, volume_group_id) def volume_groups_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volume groups""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.VolumeGroup, marker, limit, sort_keys, sort_dirs, filters, offset) # No volume group would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.VolumeGroup) def _process_volume_groups_info_filters(query, filters): """Common filter processing for volume groups queries.""" if filters: if not is_valid_model_filters(models.VolumeGroup, filters): return query = query.filter_by(**filters) return query def volume_groups_delete_by_storage(context, storage_id): """Delete all the volume groups of a device""" _volume_groups_get_query(context).filter_by(storage_id=storage_id) \ .delete() def _masking_views_get_query(context, session=None): return model_query(context, models.MaskingView, session=session) def _masking_views_get(context, masking_view_id, session=None): result = (_masking_views_get_query(context, session=session) .filter_by(id=masking_view_id) .first()) if not result: raise exception.MaskingViewNotFound(masking_view_id) return result def masking_views_create(context, masking_views): """Create multiple masking views.""" session = get_session() masking_views_refs = [] with session.begin(): for masking_view in masking_views: LOG.debug('Adding new masking view for ' 'native_masking_view_id {0}:' .format(masking_view.get('native_masking_view_id'))) if not masking_view.get('id'): masking_view['id'] = uuidutils.generate_uuid() masking_view_ref = models.MaskingView() masking_view_ref.update(masking_view) masking_views_refs.append(masking_view_ref) session.add_all(masking_views_refs) return masking_views_refs def masking_views_delete(context, masking_views_id_list): """Delete multiple masking views.""" session = get_session() with session.begin(): for masking_view_id in masking_views_id_list: LOG.debug('Deleting masking view {0}:'.format(masking_view_id)) query = _masking_views_get_query(context, session) result = query.filter_by(id=masking_view_id).delete() if not result: LOG.error(exception.MaskingViewNotFound(masking_view_id)) return def masking_views_update(context, masking_views): """Update multiple masking views.""" session = get_session() with session.begin(): for masking_view in masking_views: LOG.debug('Updating masking views {0}:' .format(masking_view.get('id'))) query = _masking_views_get_query(context, session) result = query.filter_by(id=masking_view.get('id') ).update(masking_view) if not result: LOG.error(exception.MaskingViewNotFound(masking_view .get('id'))) def masking_views_get(context, masking_view_id): """Get a masking view or raise an exception if it does not exist.""" return _masking_views_get(context, masking_view_id) def masking_views_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all masking views""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models.MaskingView, marker, limit, sort_keys, sort_dirs, filters, offset) # No masking view would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.MaskingView) def _process_masking_views_info_filters(query, filters): """Common filter processing for masking views queries.""" if filters: if not is_valid_model_filters(models.MaskingView, filters): return query = query.filter_by(**filters) return query def masking_views_delete_by_storage(context, storage_id): """Delete all the masking views of a device""" _masking_views_get_query(context).filter_by(storage_id=storage_id)\ .delete() def _storage_host_grp_host_rels_get_query(context, session=None): return model_query(context, models.StorageHostGrpHostRel, session=session) def _storage_host_grp_host_rels_get(context, host_grp_host_relation_id, session=None): result = ( _storage_host_grp_host_rels_get_query(context, session=session) .filter_by(id=host_grp_host_relation_id).first()) if not result: raise exception.StorageHostGrpHostRelNotFound( host_grp_host_relation_id) return result def storage_host_grp_host_rels_create(context, host_grp_host_relations): """Create multiple storage host grp host relations.""" session = get_session() host_grp_host_relation_refs = [] with session.begin(): for host_grp_host_relation in host_grp_host_relations: LOG.debug('Adding new storage host group host relation for ' 'native storage host group id {0}:' .format(host_grp_host_relation .get('native_storage_host_group_id'))) if not host_grp_host_relation.get('id'): host_grp_host_relation['id'] = uuidutils.generate_uuid() host_grp_host_relation_ref \ = models.StorageHostGrpHostRel() host_grp_host_relation_ref.update(host_grp_host_relation) host_grp_host_relation_refs.append(host_grp_host_relation_ref) session.add_all(host_grp_host_relation_refs) return host_grp_host_relation_refs def storage_host_grp_host_rels_delete(context, host_grp_host_relations_list): """Delete multiple storage host grp host relations.""" session = get_session() with session.begin(): for host_grp_host_relation_id in host_grp_host_relations_list: LOG.debug('deleting storage host grp host relation {0}:'.format( host_grp_host_relation_id)) query = _storage_host_grp_host_rels_get_query(context, session) result = query.filter_by(id=host_grp_host_relation_id).delete() if not result: LOG.error(exception.StorageHostGrpHostRelNotFound( host_grp_host_relation_id)) return def storage_host_grp_host_rels_update(context, host_grp_host_relations_list): """Update multiple storage host grp host relations.""" session = get_session() with session.begin(): for host_grp_host_relation in host_grp_host_relations_list: LOG.debug('Updating storage host grp host relations {0}:' .format(host_grp_host_relation.get('id'))) query = _storage_host_grp_host_rels_get_query(context, session) result = query.filter_by(id=host_grp_host_relation.get('id') ).update(host_grp_host_relation) if not result: LOG.error(exception.StorageHostGrpHostRelNotFound( host_grp_host_relation.get('id'))) def storage_host_grp_host_rels_get(context, host_grp_host_relation_id): """Get a storage host grp host relation or raise an exception if it does not exist. """ return _storage_host_grp_host_rels_get(context, host_grp_host_relation_id) def storage_host_grp_host_rels_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all storage host grp host relations""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models .StorageHostGrpHostRel, marker, limit, sort_keys, sort_dirs, filters, offset) # No storage host grp host relation would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.StorageHostGrpHostRel) def _process_storage_host_grp_host_rels_info_filters(query, filters): """Common filter processing for storage host grp host relations queries.""" if filters: if not is_valid_model_filters(models.StorageHostGrpHostRel, filters): return query = query.filter_by(**filters) return query def storage_host_grp_host_rels_delete_by_storage(context, storage_id): """Delete all the storage host grp host relations of a device""" _storage_host_grp_host_rels_get_query(context) \ .filter_by(storage_id=storage_id).delete() def _port_grp_port_rels_get_query(context, session=None): return model_query(context, models.PortGrpPortRel, session=session) def _port_grp_port_rels_get(context, port_grp_port_relation_id, session=None): result = (_port_grp_port_rels_get_query(context, session=session) .filter_by(id=port_grp_port_relation_id).first()) if not result: raise exception.PortGrpPortRelNotFound(port_grp_port_relation_id) return result def port_grp_port_rels_create(context, port_grp_port_rels): """Create multiple port grp port relations.""" session = get_session() port_grp_port_relation_refs = [] with session.begin(): for port_grp_port_relation in port_grp_port_rels: LOG.debug('adding new port group port relation for ' 'native port group id {0}:' .format(port_grp_port_relation .get('native_port_group_id'))) if not port_grp_port_relation.get('id'): port_grp_port_relation['id'] = uuidutils.generate_uuid() port_grp_port_relation_ref \ = models.PortGrpPortRel() port_grp_port_relation_ref.update(port_grp_port_relation) port_grp_port_relation_refs.append(port_grp_port_relation_ref) session.add_all(port_grp_port_relation_refs) return port_grp_port_relation_refs def port_grp_port_rels_delete(context, port_grp_port_rels_list): """Delete multiple port grp port relations.""" session = get_session() with session.begin(): for port_grp_port_relation_id in port_grp_port_rels_list: LOG.debug('deleting port grp port relation {0}:'.format( port_grp_port_relation_id)) query = _port_grp_port_rels_get_query(context, session) result = query.filter_by(id=port_grp_port_relation_id).delete() if not result: LOG.error(exception.PortGrpPortRelNotFound( port_grp_port_relation_id)) return def port_grp_port_rels_update(context, port_grp_port_rels_list): """Update multiple port grp port relations.""" session = get_session() with session.begin(): for port_grp_port_relation in port_grp_port_rels_list: LOG.debug('Updating port grp port relations {0}:' .format(port_grp_port_relation.get('id'))) query = _port_grp_port_rels_get_query(context, session) result = query.filter_by(id=port_grp_port_relation.get('id') ).update(port_grp_port_relation) if not result: LOG.error(exception.PortGrpPortRelNotFound( port_grp_port_relation.get('id'))) def port_grp_port_rels_get(context, port_grp_port_relation_id): """Get a port grp port relation or raise an exception if it does not exist. """ return _port_grp_port_rels_get(context, port_grp_port_relation_id) def port_grp_port_rels_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all port grp port relations""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models. PortGrpPortRel, marker, limit, sort_keys, sort_dirs, filters, offset) # No port grp port relation would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.PortGrpPortRel) def _process_port_grp_port_rels_info_filters(query, filters): """Common filter processing for port grp port relations queries.""" if filters: if not is_valid_model_filters(models.PortGrpPortRel, filters): return query = query.filter_by(**filters) return query def port_grp_port_rels_delete_by_storage(context, storage_id): """Delete all the port grp port relations of a device""" _port_grp_port_rels_get_query(context) \ .filter_by(storage_id=storage_id).delete() def _vol_grp_vol_rels_get_query(context, session=None): return model_query(context, models.VolGrpVolRel, session=session) def _vol_grp_vol_rels_get(context, volume_grp_volume_relation_id, session=None): result = (_vol_grp_vol_rels_get_query(context, session=session) .filter_by(id=volume_grp_volume_relation_id).first()) if not result: raise exception.VolGrpVolRelNotFound( volume_grp_volume_relation_id) return result def vol_grp_vol_rels_create(context, vol_grp_vol_rels): """Create multiple volume grp volume relations.""" session = get_session() volume_grp_volume_relation_refs = [] with session.begin(): for volume_grp_volume_relation in vol_grp_vol_rels: LOG.debug('adding new volume group volume relation for ' 'native volume group id {0}:' .format(volume_grp_volume_relation .get('native_volume_group_id'))) if not volume_grp_volume_relation.get('id'): volume_grp_volume_relation['id'] = uuidutils.generate_uuid() volume_grp_volume_relation_ref \ = models.VolGrpVolRel() volume_grp_volume_relation_ref.update(volume_grp_volume_relation) volume_grp_volume_relation_refs.append( volume_grp_volume_relation_ref) session.add_all(volume_grp_volume_relation_refs) return volume_grp_volume_relation_refs def vol_grp_vol_rels_delete(context, vol_grp_vol_rels_list): """Delete multiple volume grp volume relations.""" session = get_session() with session.begin(): for volume_grp_volume_relation_id in vol_grp_vol_rels_list: LOG.debug('deleting volume grp volume relation {0}:'.format( volume_grp_volume_relation_id)) query = _vol_grp_vol_rels_get_query(context, session) result = query.filter_by(id=volume_grp_volume_relation_id).delete() if not result: LOG.error(exception.VolGrpVolRelationNotFound( volume_grp_volume_relation_id)) return def vol_grp_vol_rels_update(context, vol_grp_vol_rels_list): """Update multiple volume grp volume relations.""" session = get_session() with session.begin(): for volume_grp_volume_relation in vol_grp_vol_rels_list: LOG.debug('Updating volume grp volume relations {0}:' .format(volume_grp_volume_relation.get('id'))) query = _vol_grp_vol_rels_get_query(context, session) result = query.filter_by(id=volume_grp_volume_relation.get('id') ).update(volume_grp_volume_relation) if not result: LOG.error(exception.VolGrpVolRelationNotFound( volume_grp_volume_relation.get('id'))) def vol_grp_vol_rels_get(context, volume_grp_volume_relation_id): """Get a volume grp volume relation or raise an exception if it does not exist. """ return _vol_grp_vol_rels_get(context, volume_grp_volume_relation_id) def vol_grp_vol_rels_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volume grp volume relations""" session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, models. VolGrpVolRel, marker, limit, sort_keys, sort_dirs, filters, offset) # No volume grp volume relation would match, return empty list if query is None: return [] return query.all() @apply_like_filters(model=models.VolGrpVolRel) def _process_vol_grp_vol_rels_info_filters(query, filters): """Common filter processing for volume grp volume relations queries.""" if filters: if not is_valid_model_filters(models.VolGrpVolRel, filters): return query = query.filter_by(**filters) return query def vol_grp_vol_rels_delete_by_storage(context, storage_id): """Delete all the volume grp volume relations of a device""" _vol_grp_vol_rels_get_query(context) \ .filter_by(storage_id=storage_id).delete() PAGINATION_HELPERS = { models.AccessInfo: (_access_info_get_query, _process_access_info_filters, _access_info_get), models.StoragePool: (_storage_pool_get_query, _process_storage_pool_info_filters, _storage_pool_get), models.Storage: (_storage_get_query, _process_storage_info_filters, _storage_get), models.AlertSource: (_alert_source_get_query, _process_alert_source_filters, _alert_source_get), models.Volume: (_volume_get_query, _process_volume_info_filters, _volume_get), models.Controller: (_controller_get_query, _process_controller_info_filters, _controller_get), models.Port: (_port_get_query, _process_port_info_filters, _port_get), models.Disk: (_disk_get_query, _process_disk_info_filters, _disk_get), models.Quota: (_quota_get_query, _process_quota_info_filters, _quota_get), models.Filesystem: (_filesystem_get_query, _process_filesystem_info_filters, _filesystem_get), models.Qtree: (_qtree_get_query, _process_qtree_info_filters, _qtree_get), models.Share: (_share_get_query, _process_share_info_filters, _share_get), models.Task: (_task_get_query, _process_tasks_info_filters, _task_get), models.FailedTask: (_failed_tasks_get_query, _process_failed_tasks_info_filters, _failed_tasks_get), models.StorageHostInitiator: ( _storage_host_initiators_get_query, _process_storage_host_initiators_info_filters, _storage_host_initiators_get), models.StorageHost: (_storage_hosts_get_query, _process_storage_hosts_info_filters, _storage_hosts_get), models.StorageHostGroup: (_storage_host_groups_get_query, _process_storage_host_groups_info_filters, _storage_host_groups_get), models.PortGroup: (_port_groups_get_query, _process_port_groups_info_filters, _port_groups_get), models.VolumeGroup: (_volume_groups_get_query, _process_volume_groups_info_filters, _volume_groups_get), models.MaskingView: (_masking_views_get_query, _process_masking_views_info_filters, _masking_views_get), models.StorageHostGrpHostRel: ( _storage_host_grp_host_rels_get_query, _process_storage_host_grp_host_rels_info_filters, _storage_host_grp_host_rels_get), models.PortGrpPortRel: (_port_grp_port_rels_get_query, _process_port_grp_port_rels_info_filters, _port_grp_port_rels_get), models.VolGrpVolRel: ( _vol_grp_vol_rels_get_query, _process_vol_grp_vol_rels_info_filters, _vol_grp_vol_rels_get), } def process_sort_params(sort_keys, sort_dirs, default_keys=None, default_dir='asc'): """Process the sort parameters to include default keys. Creates a list of sort keys and a list of sort directions. Adds the default keys to the end of the list if they are not already included. When adding the default keys to the sort keys list, the associated direction is: 1) The first element in the 'sort_dirs' list (if specified), else 2) 'default_dir' value (Note that 'asc' is the default value since this is the default in sqlalchemy.utils.paginate_query) :param sort_keys: List of sort keys to include in the processed list :param sort_dirs: List of sort directions to include in the processed list :param default_keys: List of sort keys that need to be included in the processed list, they are added at the end of the list if not already specified. :param default_dir: Sort direction associated with each of the default keys that are not supplied, used when they are added to the processed list :returns: list of sort keys, list of sort directions :raise exception.InvalidInput: If more sort directions than sort keys are specified or if an invalid sort direction is specified """ if default_keys is None: default_keys = ['created_at'] # Determine direction to use for when adding default keys if sort_dirs and len(sort_dirs): default_dir_value = sort_dirs[0] else: default_dir_value = default_dir # Create list of keys (do not modify the input list) if sort_keys: result_keys = list(sort_keys) else: result_keys = [] # If a list of directions is not provided, use the default sort direction # for all provided keys. if sort_dirs: result_dirs = [] # Verify sort direction for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'.") raise exception.InvalidInput(msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] # Ensure that the key and direction length match while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) # Unless more direction are specified, which is an error if len(result_dirs) > len(result_keys): msg = _("Sort direction array size exceeds sort key array size.") raise exception.InvalidInput(msg) # Ensure defaults are included for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs def _generate_paginate_query(context, session, paginate_type, marker, limit, sort_keys, sort_dirs, filters, offset=None ): """Generate the query to include the filters and the paginate options. Returns a query with sorting / pagination criteria added or None if the given filters will not yield any results. :param context: context to query under :param session: the session to use :param marker: the last item of the previous page; we returns the next results after this value. :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :param offset: number of items to skip :param paginate_type: type of pagination to generate :returns: updated query or None """ get_query, process_filters, get = PAGINATION_HELPERS[paginate_type] sort_keys, sort_dirs = process_sort_params(sort_keys, sort_dirs, default_dir='desc') query = get_query(context, session=session) if filters: query = process_filters(query, filters) if query is None: return None marker_object = None if marker is not None: marker_object = get(context, marker, session) return sqlalchemyutils.paginate_query(query, paginate_type, limit, sort_keys, marker=marker_object, sort_dirs=sort_dirs, offset=offset) ================================================ FILE: delfin/db/sqlalchemy/models.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for Delfin data. """ from oslo_config import cfg from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy.types import JsonEncodedDict from sqlalchemy import Column, Integer, String, Boolean, BigInteger, \ DateTime, BIGINT from sqlalchemy.ext.declarative import declarative_base from delfin.common import constants CONF = cfg.CONF BASE = declarative_base() class DelfinBase(models.ModelBase, models.TimestampMixin): """Base class for Delfin Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} metadata = None def to_dict(self): model_dict = {} for k, v in self.items(): if not issubclass(type(v), DelfinBase): model_dict[k] = v return model_dict class AccessInfo(BASE, DelfinBase): """Represent access info required for storage accessing.""" __tablename__ = "access_info" storage_id = Column(String(36), primary_key=True) storage_name = Column(String(255)) vendor = Column(String(255)) model = Column(String(255)) rest = Column(JsonEncodedDict) ssh = Column(JsonEncodedDict) cli = Column(JsonEncodedDict) smis = Column(JsonEncodedDict) extra_attributes = Column(JsonEncodedDict) class Storage(BASE, DelfinBase): """Represents a storage object.""" __tablename__ = 'storages' id = Column(String(36), primary_key=True) name = Column(String(255)) description = Column(String(255)) location = Column(String(255)) status = Column(String(255)) sync_status = Column(Integer, default=constants.SyncStatus.SYNCED) vendor = Column(String(255)) model = Column(String(255)) serial_number = Column(String(255)) firmware_version = Column(String(255)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) raw_capacity = Column(BigInteger) subscribed_capacity = Column(BigInteger) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) class Volume(BASE, DelfinBase): """Represents a volume object.""" __tablename__ = 'volumes' id = Column(String(36), primary_key=True) native_volume_id = Column(String(255)) name = Column(String(255)) description = Column(String(255)) type = Column(String(255)) status = Column(String(255)) storage_id = Column(String(36)) native_storage_pool_id = Column(String(255)) wwn = Column(String(255)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) compressed = Column(Boolean) deduplicated = Column(Boolean) class StoragePool(BASE, DelfinBase): """Represents a storage_pool object.""" __tablename__ = 'storage_pools' id = Column(String(36), primary_key=True) native_storage_pool_id = Column(String(255)) name = Column(String(255)) description = Column(String(255)) storage_type = Column(String(255)) status = Column(String(255)) storage_id = Column(String(36)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) subscribed_capacity = Column(BigInteger) class Disk(BASE, DelfinBase): """Represents a disk object.""" __tablename__ = 'disks' id = Column(String(36), primary_key=True) native_disk_id = Column(String(255)) name = Column(String(255)) physical_type = Column(String(255)) logical_type = Column(String(255)) status = Column(String(255)) location = Column(String(255)) storage_id = Column(String(255)) native_disk_group_id = Column(String(255)) serial_number = Column(String(255)) manufacturer = Column(String(255)) model = Column(String(255)) firmware = Column(String(255)) speed = Column(Integer) capacity = Column(BigInteger) health_score = Column(Integer) class Controller(BASE, DelfinBase): """Represents a controller object.""" __tablename__ = 'controllers' id = Column(String(36), primary_key=True) native_controller_id = Column(String(255)) name = Column(String(255)) status = Column(String(255)) location = Column(String(255)) soft_version = Column(String(255)) cpu_info = Column(String(255)) cpu_count = Column(Integer) memory_size = Column(BigInteger) storage_id = Column(String(36)) mgmt_ip = Column(String(255)) class Port(BASE, DelfinBase): """Represents a port object.""" __tablename__ = 'ports' id = Column(String(36), primary_key=True) native_port_id = Column(String(255)) name = Column(String(255)) location = Column(String(255)) type = Column(String(255)) logical_type = Column(String(255)) connection_status = Column(String(255)) health_status = Column(String(255)) storage_id = Column(String(36)) native_parent_id = Column(String(255)) speed = Column(Integer) max_speed = Column(Integer) wwn = Column(String(255)) mac_address = Column(String(255)) ipv4 = Column(String(255)) ipv4_mask = Column(String(255)) ipv6 = Column(String(255)) ipv6_mask = Column(String(255)) class Filesystem(BASE, DelfinBase): """Represents a filesystem object.""" __tablename__ = 'filesystems' id = Column(String(36), primary_key=True) native_filesystem_id = Column(String(255)) name = Column(String(255)) type = Column(String(255)) status = Column(String(255)) storage_id = Column(String(36)) native_pool_id = Column(String(255)) security_mode = Column(String(255)) total_capacity = Column(BigInteger) used_capacity = Column(BigInteger) free_capacity = Column(BigInteger) compressed = Column(Boolean) deduplicated = Column(Boolean) worm = Column(String(255)) class Qtree(BASE, DelfinBase): """Represents a qtree object.""" __tablename__ = 'qtrees' id = Column(String(36), primary_key=True) native_qtree_id = Column(String(255)) name = Column(String(255)) path = Column(String(255)) storage_id = Column(String(36)) native_filesystem_id = Column(String(255)) security_mode = Column(String(255)) class Quota(BASE, DelfinBase): """Represents a qtree object.""" __tablename__ = 'quota' id = Column(String(36), primary_key=True) native_quota_id = Column(String(255)) type = Column(String(255)) storage_id = Column(String(36)) native_filesystem_id = Column(String(255)) native_qtree_id = Column(String(255)) capacity_hard_limit = Column(BigInteger) capacity_soft_limit = Column(BigInteger) file_hard_limit = Column(BigInteger) file_soft_limit = Column(BigInteger) file_count = Column(BigInteger) used_capacity = Column(BigInteger) user_group_name = Column(String(255)) class Share(BASE, DelfinBase): """Represents a share object.""" __tablename__ = 'shares' id = Column(String(36), primary_key=True) native_share_id = Column(String(255)) name = Column(String(255)) path = Column(String(255)) storage_id = Column(String(36)) native_filesystem_id = Column(String(255)) native_qtree_id = Column(String(255)) protocol = Column(String(255)) class AlertSource(BASE, DelfinBase): """Represents an alert source configuration.""" __tablename__ = 'alert_source' storage_id = Column(String(36), primary_key=True) host = Column(String(255)) version = Column(String(255)) community_string = Column(String(255)) username = Column(String(255)) security_level = Column(String(255)) auth_key = Column(String(255)) auth_protocol = Column(String(255)) privacy_protocol = Column(String(255)) privacy_key = Column(String(255)) engine_id = Column(String(255)) port = Column(Integer) context_name = Column(String(255)) retry_num = Column(Integer) expiration = Column(Integer) class Task(BASE, DelfinBase): """Represents a task attributes.""" __tablename__ = 'tasks' id = Column(Integer, primary_key=True, autoincrement=True) storage_id = Column(String(36)) interval = Column(Integer) method = Column(String(255)) args = Column(JsonEncodedDict) last_run_time = Column(Integer) job_id = Column(String(36)) executor = Column(String(255)) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) class FailedTask(BASE, DelfinBase): """Represents a failed task attributes.""" __tablename__ = 'failed_tasks' id = Column(Integer, primary_key=True, autoincrement=True) storage_id = Column(String(36)) task_id = Column(Integer) interval = Column(Integer) start_time = Column(BIGINT) end_time = Column(BIGINT) retry_count = Column(Integer) method = Column(String(255)) result = Column(String(255)) job_id = Column(String(36)) executor = Column(String(255)) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) class StorageHostInitiator(BASE, DelfinBase): """Represents the storage host initiator attributes.""" __tablename__ = 'storage_host_initiators' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) alias = Column(String(255)) wwn = Column(String(255)) status = Column(String(255)) type = Column(String(255)) native_storage_host_id = Column(String(255)) native_storage_host_initiator_id = Column(String(255)) class StorageHost(BASE, DelfinBase): """Represents the storage host attributes.""" __tablename__ = 'storage_hosts' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) os_type = Column(String(255)) ip_address = Column(String(255)) status = Column(String(255)) native_storage_host_id = Column(String(255)) class StorageHostGroup(BASE, DelfinBase): """Represents the storage host group attributes.""" __tablename__ = 'storage_host_groups' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) native_storage_host_group_id = Column(String(255)) class PortGroup(BASE, DelfinBase): """Represents the port group attributes.""" __tablename__ = 'port_groups' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) native_port_group_id = Column(String(255)) class VolumeGroup(BASE, DelfinBase): """Represents the volume group attributes.""" __tablename__ = 'volume_groups' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) native_volume_group_id = Column(String(255)) class MaskingView(BASE, DelfinBase): """Represents the masking view attributes.""" __tablename__ = 'masking_views' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) native_storage_host_group_id = Column(String(255)) native_volume_group_id = Column(String(255)) native_port_group_id = Column(String(255)) native_storage_host_id = Column(String(255)) native_volume_id = Column(String(255)) native_masking_view_id = Column(String(255)) class StorageHostGrpHostRel(BASE, DelfinBase): """Represents the storage host group and storage host relation attributes. """ __tablename__ = 'storage_host_grp_host_rels' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) native_storage_host_group_id = Column(String(255)) native_storage_host_id = Column(String(255)) class PortGrpPortRel(BASE, DelfinBase): """Represents port group and port relation attributes.""" __tablename__ = 'port_grp_port_rels' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) native_port_group_id = Column(String(255)) native_port_id = Column(String(255)) class VolGrpVolRel(BASE, DelfinBase): """Represents the volume group and volume relation attributes.""" __tablename__ = 'vol_grp_vol_rels' id = Column(String(36), primary_key=True) storage_id = Column(String(36)) name = Column(String(255)) description = Column(String(255)) native_volume_group_id = Column(String(255)) native_volume_id = Column(String(255)) ================================================ FILE: delfin/drivers/__init__.py ================================================ ================================================ FILE: delfin/drivers/api.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log from oslo_utils import uuidutils from delfin import db from delfin.drivers import helper from delfin.drivers import manager LOG = log.getLogger(__name__) class API(object): def __init__(self): self.driver_manager = manager.DriverManager() def discover_storage(self, context, access_info): """Discover a storage system with access information.""" helper.encrypt_password(context, access_info) if 'storage_id' not in access_info: access_info['storage_id'] = six.text_type( uuidutils.generate_uuid()) driver = self.driver_manager.get_driver(context, cache_on_load=False, **access_info) storage = driver.get_storage(context) # Need to validate storage response from driver helper.check_storage_repetition(context, storage) access_info = db.access_info_create(context, access_info) storage['id'] = access_info['storage_id'] storage = db.storage_create(context, storage) LOG.info("Storage found successfully.") return storage def update_access_info(self, context, access_info): """Validate and update access information.""" helper.encrypt_password(context, access_info) driver = self.driver_manager.get_driver(context, cache_on_load=False, **access_info) storage_new = driver.get_storage(context) # Need to validate storage response from driver storage_id = access_info['storage_id'] helper.check_storage_consistency(context, storage_id, storage_new) access_info = db.access_info_update(context, storage_id, access_info) db.storage_update(context, storage_id, storage_new) LOG.info("Access information updated successfully.") return access_info def remove_storage(self, context, storage_id): """Clear driver instance from driver factory.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) driver.delete_storage(context) self.driver_manager.remove_driver(storage_id) def get_storage(self, context, storage_id): """Get storage device information from storage system""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.get_storage(context) def list_storage_pools(self, context, storage_id): """List all storage pools from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_storage_pools(context) def list_volumes(self, context, storage_id): """List all storage volumes from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_volumes(context) def list_controllers(self, context, storage_id): """List all storage controllers from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_controllers(context) def list_ports(self, context, storage_id): """List all ports from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_ports(context) def list_disks(self, context, storage_id): """List all disks from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_disks(context) def list_quotas(self, context, storage_id): """List all quotas from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_quotas(context) def list_filesystems(self, context, storage_id): """List all filesystems from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_filesystems(context) def list_qtrees(self, context, storage_id): """List all qtrees from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_qtrees(context) def list_shares(self, context, storage_id): """List all shares from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_shares(context) def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" pass def remove_trap_config(self, context, storage_id, trap_config): """Remove trap receiver configuration from storage system.""" pass def parse_alert(self, context, storage_id, alert): """Parse alert data got from snmp trap server.""" access_info = db.access_info_get(context, storage_id) driver = self.driver_manager.get_driver(context, invoke_on_load=False, **access_info) return driver.parse_alert(context, alert) def clear_alert(self, context, storage_id, sequence_number): """Clear alert from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) driver.clear_alert(context, sequence_number) def list_alerts(self, context, storage_id, query_para=None): """List alert from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_alerts(context, query_para) def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): """Collect performance metrics""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.collect_perf_metrics(context, storage_id, resource_metrics, start_time, end_time) def get_capabilities(self, context, storage_id, filters=None): """Get capabilities from supported driver""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.get_capabilities(context, filters) def list_storage_host_initiators(self, context, storage_id): """List all storage initiators from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_storage_host_initiators(context) def list_storage_hosts(self, context, storage_id): """List all storage hosts from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_storage_hosts(context) def list_storage_host_groups(self, context, storage_id): """List all storage host groups from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_storage_host_groups(context) def list_port_groups(self, context, storage_id): """List all port groups from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_port_groups(context) def list_volume_groups(self, context, storage_id): """List all volume groups from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_volume_groups(context) def list_masking_views(self, context, storage_id): """List all masking views from storage system.""" driver = self.driver_manager.get_driver(context, storage_id=storage_id) return driver.list_masking_views(context) def get_alert_sources(self, context, storage_id): access_info = db.access_info_get(context, storage_id) driver = self.driver_manager.get_driver(context, cache_on_load=False, **access_info) return driver.get_alert_sources(context) ================================================ FILE: delfin/drivers/dell_emc/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/power_store/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/power_store/consts.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants class StatusCode(object): SUCCESS = 200 SUCCESS_CREATE_RESPONSE = 201 SUCCESS_NO_CONTENT = 204 PARTIAL_CONTENT = 206 UNAUTHORIZED = 401 FORBIDDEN = 403 class DigitalConstant(object): ZERO = 0 ONE = 1 MINUS_ONE = -1 TWO = 2 THREE = 3 FIVE = 5 SIX = 6 SIXTY = 60 STORAGE_STATUS_MAP = { 'Unconfigured': constants.StorageStatus.NORMAL, 'Unconfigured_Faulted': constants.StorageStatus.ABNORMAL, 'Configuring': constants.StorageStatus.NORMAL, 'Core_Initialization': constants.StorageStatus.NORMAL, 'Configured': constants.StorageStatus.NORMAL, 'Expanding': constants.StorageStatus.NORMAL, 'Removing': constants.StorageStatus.NORMAL, 'Clustering_Failed': constants.StorageStatus.ABNORMAL, 'Core_Initialization_Failed': constants.StorageStatus.ABNORMAL, 'Removed': constants.StorageStatus.OFFLINE, 'Post_Core_Initialization': constants.StorageStatus.NORMAL, 'Unknown': constants.StorageStatus.UNKNOWN } VOLUME_STATUS_MAP = { 'Ready': constants.StorageStatus.NORMAL, 'Initializing': constants.StorageStatus.NORMAL, 'Offline': constants.StorageStatus.OFFLINE, 'Destroying': constants.StorageStatus.NORMAL } VIRTUAL_VOLUME_STATUS_MAP = { 'Ready': constants.StorageStatus.NORMAL, 'Not_Ready': constants.StorageStatus.ABNORMAL, 'Write_Disabled': constants.StorageStatus.ABNORMAL, 'Mixed': constants.StorageStatus.ABNORMAL, 'Not_Applicable': constants.StorageStatus.ABNORMAL } VOLUME_TYPE_MAP = { 'Primary': constants.VolumeType.THIN, 'Clone': constants.VolumeType.THIN } DISK_PHYSICAL_TYPE = { 'SAS_SSD': constants.DiskPhysicalType.SSD, 'NVMe_SCM': constants.DiskPhysicalType.UNKNOWN, 'NVMe_SSD': constants.DiskPhysicalType.NVME_SSD, 'Unknown': constants.DiskPhysicalType.UNKNOWN } DISK_STATUS_MAP = { 'Uninitialized': constants.DiskStatus.NORMAL, 'Healthy': constants.DiskStatus.NORMAL, 'Initializing': constants.DiskStatus.NORMAL, 'Failed': constants.DiskStatus.ABNORMAL, 'Disconnected': constants.DiskStatus.OFFLINE, 'Prepare_Failed': constants.DiskStatus.NORMAL, 'Trigger_Update': constants.DiskStatus.NORMAL } CONTROLLER_STATUS_MAP = { 'Uninitialized': constants.ControllerStatus.NORMAL, 'Healthy': constants.ControllerStatus.NORMAL, 'Initializing': constants.ControllerStatus.NORMAL, 'Failed': constants.ControllerStatus.FAULT, 'Disconnected': constants.ControllerStatus.OFFLINE, 'Prepare_Failed': constants.ControllerStatus.NORMAL, 'Trigger_Update': constants.ControllerStatus.NORMAL } PORT_CONNECTION_STATUS_MAP = { 'true': constants.PortConnectionStatus.CONNECTED, True: constants.PortConnectionStatus.CONNECTED, 'false': constants.PortConnectionStatus.DISCONNECTED, False: constants.PortConnectionStatus.DISCONNECTED } PORT_HEALTH_STATUS_MAP = { 'Uninitialized': constants.PortHealthStatus.NORMAL, 'Healthy': constants.PortHealthStatus.NORMAL, 'Initializing': constants.PortHealthStatus.NORMAL, 'Failed': constants.PortHealthStatus.ABNORMAL, 'Disconnected': constants.PortHealthStatus.NORMAL, 'Prepare_Failed': constants.PortHealthStatus.NORMAL, 'Trigger_Update': constants.PortHealthStatus.NORMAL, 'Empty': constants.PortHealthStatus.UNKNOWN } ALERT_SEVERITY_MAP = { 'Critical': constants.Severity.CRITICAL, 'Major': constants.Severity.MAJOR, 'Minor': constants.Severity.MINOR, 'Info': constants.Severity.INFORMATIONAL, 'None': constants.Severity.NOT_SPECIFIED, } HOST_OS_TYPES_MAP = { 'Windows': constants.HostOSTypes.WINDOWS, 'Linux': constants.HostOSTypes.LINUX, 'ESXi': constants.HostOSTypes.VMWARE_ESX, 'AIX': constants.HostOSTypes.AIX, 'HP-UX': constants.HostOSTypes.HP_UX, 'Solaris': constants.HostOSTypes.SOLARIS } INITIATOR_TYPE_MAP = { 'iSCSI': constants.InitiatorType.ISCSI, 'FC': constants.InitiatorType.FC, 'NVMe': constants.InitiatorType.NVME_OVER_FABRIC, 'NVMe_vVol': constants.InitiatorType.NVME_OVER_FABRIC } class DiskType(object): NVME_NVRAM = 'NVMe_NVRAM' NVME_SCM = 'NVMe_SCM' ALL = (NVME_SCM, NVME_NVRAM) # /metrics/generate SPACE_METRICS_BY_APPLIANCE = 'space_metrics_by_appliance' SPACE_METRICS_BY_VOLUME = 'space_metrics_by_volume' PERFORMANCE_METRICS_BY_CLUSTER = 'performance_metrics_by_cluster' PERFORMANCE_METRICS_BY_APPLIANCE = 'performance_metrics_by_appliance' PERFORMANCE_METRICS_BY_VOLUME = 'performance_metrics_by_volume' PERFORMANCE_METRICS_BY_NODE = 'performance_metrics_by_node' PERFORMANCE_METRICS_BY_FE_FC_PORT = 'performance_metrics_by_fe_fc_port' PERFORMANCE_METRICS_INTERVAL = 'Twenty_Sec' PERF_INTERVAL = 20 # character CHARACTER_DRIVE = 'Drive' CHARACTER_NODE = 'Node' CHARACTER_SNAPSHOT = 'Snapshot' CHARACTER_EMPTY = 'Empty' MGMT_NODE_COREOS = 'Mgmt_Node_CoreOS' LIMIT_COUNT = 2000 DEFAULT_TIMEOUT = 10 UTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%f+00:00' SYSTEM_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' PERF_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ' PARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.1139.205.1.1.2' PARSE_ALERT_CODE = '1.3.6.1.4.1.1139.205.1.1.1' PARSE_ALERT_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' PARSE_ALERT_TIME = '1.3.6.1.2.1.1.3.0' PARSE_ALERT_TIME_UTC = '1.3.6.1.4.1.1139.205.1.1.10' PARSE_ALERT_UPDATE_TIME_UTC = '1.3.6.1.4.1.1139.205.1.1.9' PARSE_ALERT_RESOURCE_TYPE = '1.3.6.1.4.1.1139.205.1.1.4' PARSE_ALERT_RESOURCE_ID = '1.3.6.1.4.1.1139.205.1.1.5' PARSE_ALERT_RESOURCE_NAME = '1.3.6.1.4.1.1139.205.1.1.6' PARSE_ALERT_STATE = '1.3.6.1.4.1.1139.205.1.1.7' PARSE_ALERT_APPLIANCE = '1.3.6.1.4.1.1139.205.1.1.8' SNMP_SEVERITY_MAP = { '1.3.6.1.4.1.1139.205.1.2.1': constants.Severity.CRITICAL, '1.3.6.1.4.1.1139.205.1.2.2': constants.Severity.MAJOR, '1.3.6.1.4.1.1139.205.1.2.3': constants.Severity.MINOR, '1.3.6.1.4.1.1139.205.1.2.4': constants.Severity.INFORMATIONAL } STORAGE_CAP = { constants.StorageMetric.IOPS.name: { "unit": constants.StorageMetric.IOPS.unit, "description": constants.StorageMetric.IOPS.description }, constants.StorageMetric.READ_IOPS.name: { "unit": constants.StorageMetric.READ_IOPS.unit, "description": constants.StorageMetric.READ_IOPS.description }, constants.StorageMetric.WRITE_IOPS.name: { "unit": constants.StorageMetric.WRITE_IOPS.unit, "description": constants.StorageMetric.WRITE_IOPS.description }, constants.StorageMetric.THROUGHPUT.name: { "unit": constants.StorageMetric.THROUGHPUT.unit, "description": constants.StorageMetric.THROUGHPUT.description }, constants.StorageMetric.READ_THROUGHPUT.name: { "unit": constants.StorageMetric.READ_THROUGHPUT.unit, "description": constants.StorageMetric.READ_THROUGHPUT.description }, constants.StorageMetric.WRITE_THROUGHPUT.name: { "unit": constants.StorageMetric.WRITE_THROUGHPUT.unit, "description": constants.StorageMetric.WRITE_THROUGHPUT.description }, constants.StorageMetric.RESPONSE_TIME.name: { "unit": constants.StorageMetric.RESPONSE_TIME.unit, "description": constants.StorageMetric.RESPONSE_TIME.description }, constants.StorageMetric.READ_RESPONSE_TIME.name: { "unit": constants.StorageMetric.READ_RESPONSE_TIME.unit, "description": constants.StorageMetric.READ_RESPONSE_TIME.description }, constants.StorageMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.StorageMetric.WRITE_RESPONSE_TIME.unit, "description": constants.StorageMetric.WRITE_RESPONSE_TIME.description }, constants.StorageMetric.IO_SIZE.name: { "unit": constants.StorageMetric.IO_SIZE.unit, "description": constants.StorageMetric.IO_SIZE.description }, constants.StorageMetric.READ_IO_SIZE.name: { "unit": constants.StorageMetric.READ_IO_SIZE.unit, "description": constants.StorageMetric.READ_IO_SIZE.description }, constants.StorageMetric.WRITE_IO_SIZE.name: { "unit": constants.StorageMetric.WRITE_IO_SIZE.unit, "description": constants.StorageMetric.WRITE_IO_SIZE.description } } STORAGE_POOL_CAP = { constants.StoragePoolMetric.IOPS.name: { "unit": constants.StoragePoolMetric.IOPS.unit, "description": constants.StoragePoolMetric.IOPS.description }, constants.StoragePoolMetric.READ_IOPS.name: { "unit": constants.StoragePoolMetric.READ_IOPS.unit, "description": constants.StoragePoolMetric.READ_IOPS.description }, constants.StoragePoolMetric.WRITE_IOPS.name: { "unit": constants.StoragePoolMetric.WRITE_IOPS.unit, "description": constants.StoragePoolMetric.WRITE_IOPS.description }, constants.StoragePoolMetric.THROUGHPUT.name: { "unit": constants.StoragePoolMetric.THROUGHPUT.unit, "description": constants.StoragePoolMetric.THROUGHPUT.description }, constants.StoragePoolMetric.READ_THROUGHPUT.name: { "unit": constants.StoragePoolMetric.READ_THROUGHPUT.unit, "description": constants.StoragePoolMetric.READ_THROUGHPUT.description }, constants.StoragePoolMetric.WRITE_THROUGHPUT.name: { "unit": constants.StoragePoolMetric.WRITE_THROUGHPUT.unit, "description": constants.StoragePoolMetric.WRITE_THROUGHPUT.description }, constants.StoragePoolMetric.RESPONSE_TIME.name: { "unit": constants.StoragePoolMetric.RESPONSE_TIME.unit, "description": constants.StoragePoolMetric.RESPONSE_TIME.description }, constants.StoragePoolMetric.READ_RESPONSE_TIME.name: { "unit": constants.StoragePoolMetric.READ_RESPONSE_TIME.unit, "description": constants.StoragePoolMetric.READ_RESPONSE_TIME.description }, constants.StoragePoolMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.StoragePoolMetric.WRITE_RESPONSE_TIME.unit, "description": constants.StoragePoolMetric.WRITE_RESPONSE_TIME.description }, constants.StoragePoolMetric.IO_SIZE.name: { "unit": constants.StoragePoolMetric.IO_SIZE.unit, "description": constants.StoragePoolMetric.IO_SIZE.description }, constants.StoragePoolMetric.READ_IO_SIZE.name: { "unit": constants.StoragePoolMetric.READ_IO_SIZE.unit, "description": constants.StoragePoolMetric.READ_IO_SIZE.description }, constants.StoragePoolMetric.WRITE_IO_SIZE.name: { "unit": constants.StoragePoolMetric.WRITE_IO_SIZE.unit, "description": constants.StoragePoolMetric.WRITE_IO_SIZE.description } } VOLUME_CAP = { constants.VolumeMetric.IOPS.name: { "unit": constants.VolumeMetric.IOPS.unit, "description": constants.VolumeMetric.IOPS.description }, constants.VolumeMetric.READ_IOPS.name: { "unit": constants.VolumeMetric.READ_IOPS.unit, "description": constants.VolumeMetric.READ_IOPS.description }, constants.VolumeMetric.WRITE_IOPS.name: { "unit": constants.VolumeMetric.WRITE_IOPS.unit, "description": constants.VolumeMetric.WRITE_IOPS.description }, constants.VolumeMetric.THROUGHPUT.name: { "unit": constants.VolumeMetric.THROUGHPUT.unit, "description": constants.VolumeMetric.THROUGHPUT.description }, constants.VolumeMetric.READ_THROUGHPUT.name: { "unit": constants.VolumeMetric.READ_THROUGHPUT.unit, "description": constants.VolumeMetric.READ_THROUGHPUT.description }, constants.VolumeMetric.WRITE_THROUGHPUT.name: { "unit": constants.VolumeMetric.WRITE_THROUGHPUT.unit, "description": constants.VolumeMetric.WRITE_THROUGHPUT.description }, constants.VolumeMetric.RESPONSE_TIME.name: { "unit": constants.VolumeMetric.RESPONSE_TIME.unit, "description": constants.VolumeMetric.RESPONSE_TIME.description }, constants.VolumeMetric.READ_RESPONSE_TIME.name: { "unit": constants.VolumeMetric.READ_RESPONSE_TIME.unit, "description": constants.VolumeMetric.READ_RESPONSE_TIME.description }, constants.VolumeMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.VolumeMetric.WRITE_RESPONSE_TIME.unit, "description": constants.VolumeMetric.WRITE_RESPONSE_TIME.description }, constants.VolumeMetric.IO_SIZE.name: { "unit": constants.VolumeMetric.IO_SIZE.unit, "description": constants.VolumeMetric.IO_SIZE.description }, constants.VolumeMetric.READ_IO_SIZE.name: { "unit": constants.VolumeMetric.READ_IO_SIZE.unit, "description": constants.VolumeMetric.READ_IO_SIZE.description }, constants.VolumeMetric.WRITE_IO_SIZE.name: { "unit": constants.VolumeMetric.WRITE_IO_SIZE.unit, "description": constants.VolumeMetric.WRITE_IO_SIZE.description } } CONTROLLER_CAP = { constants.ControllerMetric.IOPS.name: { "unit": constants.ControllerMetric.IOPS.unit, "description": constants.ControllerMetric.IOPS.description }, constants.ControllerMetric.READ_IOPS.name: { "unit": constants.ControllerMetric.READ_IOPS.unit, "description": constants.ControllerMetric.READ_IOPS.description }, constants.ControllerMetric.WRITE_IOPS.name: { "unit": constants.ControllerMetric.WRITE_IOPS.unit, "description": constants.ControllerMetric.WRITE_IOPS.description }, constants.ControllerMetric.THROUGHPUT.name: { "unit": constants.ControllerMetric.THROUGHPUT.unit, "description": constants.ControllerMetric.THROUGHPUT.description }, constants.ControllerMetric.READ_THROUGHPUT.name: { "unit": constants.ControllerMetric.READ_THROUGHPUT.unit, "description": constants.ControllerMetric.READ_THROUGHPUT.description }, constants.ControllerMetric.WRITE_THROUGHPUT.name: { "unit": constants.ControllerMetric.WRITE_THROUGHPUT.unit, "description": constants.ControllerMetric.WRITE_THROUGHPUT.description }, constants.ControllerMetric.RESPONSE_TIME.name: { "unit": constants.ControllerMetric.RESPONSE_TIME.unit, "description": constants.ControllerMetric.RESPONSE_TIME.description }, constants.ControllerMetric.READ_RESPONSE_TIME.name: { "unit": constants.ControllerMetric.READ_RESPONSE_TIME.unit, "description": constants.ControllerMetric.READ_RESPONSE_TIME.description }, constants.ControllerMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.ControllerMetric.WRITE_RESPONSE_TIME.unit, "description": constants.ControllerMetric.WRITE_RESPONSE_TIME.description }, constants.ControllerMetric.IO_SIZE.name: { "unit": constants.ControllerMetric.IO_SIZE.unit, "description": constants.ControllerMetric.IO_SIZE.description }, constants.ControllerMetric.READ_IO_SIZE.name: { "unit": constants.ControllerMetric.READ_IO_SIZE.unit, "description": constants.ControllerMetric.READ_IO_SIZE.description }, constants.ControllerMetric.WRITE_IO_SIZE.name: { "unit": constants.ControllerMetric.WRITE_IO_SIZE.unit, "description": constants.ControllerMetric.WRITE_IO_SIZE.description }, constants.ControllerMetric.CPU_USAGE.name: { "unit": constants.ControllerMetric.CPU_USAGE.unit, "description": constants.ControllerMetric.CPU_USAGE.description } } PORT_CAP = { constants.PortMetric.IOPS.name: { "unit": constants.PortMetric.IOPS.unit, "description": constants.PortMetric.IOPS.description }, constants.PortMetric.READ_IOPS.name: { "unit": constants.PortMetric.READ_IOPS.unit, "description": constants.PortMetric.READ_IOPS.description }, constants.PortMetric.WRITE_IOPS.name: { "unit": constants.PortMetric.WRITE_IOPS.unit, "description": constants.PortMetric.WRITE_IOPS.description }, constants.PortMetric.THROUGHPUT.name: { "unit": constants.PortMetric.THROUGHPUT.unit, "description": constants.PortMetric.THROUGHPUT.description }, constants.PortMetric.READ_THROUGHPUT.name: { "unit": constants.PortMetric.READ_THROUGHPUT.unit, "description": constants.PortMetric.READ_THROUGHPUT.description }, constants.PortMetric.WRITE_THROUGHPUT.name: { "unit": constants.PortMetric.WRITE_THROUGHPUT.unit, "description": constants.PortMetric.WRITE_THROUGHPUT.description }, constants.PortMetric.RESPONSE_TIME.name: { "unit": constants.PortMetric.RESPONSE_TIME.unit, "description": constants.PortMetric.RESPONSE_TIME.description }, constants.PortMetric.READ_RESPONSE_TIME.name: { "unit": constants.PortMetric.READ_RESPONSE_TIME.unit, "description": constants.PortMetric.READ_RESPONSE_TIME.description }, constants.PortMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.PortMetric.WRITE_RESPONSE_TIME.unit, "description": constants.PortMetric.WRITE_RESPONSE_TIME.description }, constants.PortMetric.IO_SIZE.name: { "unit": constants.PortMetric.IO_SIZE.unit, "description": constants.PortMetric.IO_SIZE.description }, constants.PortMetric.READ_IO_SIZE.name: { "unit": constants.PortMetric.READ_IO_SIZE.unit, "description": constants.PortMetric.READ_IO_SIZE.description }, constants.PortMetric.WRITE_IO_SIZE.name: { "unit": constants.PortMetric.WRITE_IO_SIZE.unit, "description": constants.PortMetric.WRITE_IO_SIZE.description } } ================================================ FILE: delfin/drivers/dell_emc/power_store/power_store.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants from delfin.drivers import driver from oslo_log import log from delfin.drivers.dell_emc.power_store import rest_handler, consts LOG = log.getLogger(__name__) class PowerStoreDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_handler = rest_handler.RestHandler(**kwargs) self.rest_handler.login() def get_storage(self, context): return self.rest_handler.get_storage(self.storage_id) def list_storage_pools(self, context): return self.rest_handler.get_storage_pools(self.storage_id) def list_volumes(self, context): return self.rest_handler.get_volumes(self.storage_id) def list_alerts(self, context, query_para=None): return self.rest_handler.list_alerts(query_para) def clear_alert(self, context, alert): """ PowerStore doesn't support clear alerts through API. :param context: :param alert: :return: """ pass @staticmethod def parse_alert(context, alert): return rest_handler.RestHandler.get_parse_alerts(alert) def get_alert_sources(self, context): return self.rest_handler.get_alert_sources(self.storage_id) def list_controllers(self, context): return self.rest_handler.get_controllers(self.storage_id) def list_disks(self, context): return self.rest_handler.get_disks(self.storage_id) def list_ports(self, context): hardware_d = self.rest_handler.get_port_hardware() appliance_name_dict = self.rest_handler.get_appliance_name() ports = self.rest_handler.get_fc_ports( self.storage_id, hardware_d, appliance_name_dict) ports.extend( self.rest_handler.get_eth_ports( self.storage_id, hardware_d, appliance_name_dict)) ports.extend( self.rest_handler.get_sas_ports( self.storage_id, hardware_d, appliance_name_dict)) return ports def reset_connection(self, context, **kwargs): self.rest_handler.logout() self.rest_handler.login() def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def get_access_url(): return 'https://{ip}:{port}' def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): LOG.info('The system(storage_id: %s) starts to collect powerstore ' 'performance, start_time: %s, end_time: %s', storage_id, start_time, end_time) metrics = [] if resource_metrics.get(constants.ResourceType.STORAGE): storage_metrics = self.rest_handler.get_storage_metrics( storage_id, resource_metrics.get(constants.ResourceType.STORAGE), start_time, end_time) metrics.extend(storage_metrics) LOG.info('The system(storage_id: %s) stop to collect storage' ' performance, The length is: %s', storage_id, len(storage_metrics)) if resource_metrics.get(constants.ResourceType.STORAGE_POOL): pool_metrics = self.rest_handler.get_pool_metrics( storage_id, resource_metrics.get(constants.ResourceType.STORAGE_POOL), start_time, end_time) metrics.extend(pool_metrics) LOG.info('The system(storage_id: %s) stop to collect pool' ' performance, The length is: %s', storage_id, len(pool_metrics)) if resource_metrics.get(constants.ResourceType.VOLUME): volume_metrics = self.rest_handler.get_volume_metrics( storage_id, resource_metrics.get(constants.ResourceType.VOLUME), start_time, end_time) metrics.extend(volume_metrics) LOG.info('The system(storage_id: %s) stop to collect volume' ' performance, The length is: %s', storage_id, len(volume_metrics)) if resource_metrics.get(constants.ResourceType.CONTROLLER): controller_metrics = self.rest_handler.get_controllers_metrics( storage_id, resource_metrics.get(constants.ResourceType.CONTROLLER), start_time, end_time) metrics.extend(controller_metrics) LOG.info('The system(storage_id: %s) stop to collect controller' ' performance, The length is: %s', storage_id, len(controller_metrics)) if resource_metrics.get(constants.ResourceType.PORT): fc_port_metrics = self.rest_handler.get_fc_port_metrics( storage_id, resource_metrics.get(constants.ResourceType.PORT), start_time, end_time) metrics.extend(fc_port_metrics) LOG.info('The system(storage_id: %s) stop to collect port' ' performance, The length is: %s', storage_id, len(fc_port_metrics)) return metrics @staticmethod def get_capabilities(context, filters=None): return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.STORAGE: consts.STORAGE_CAP, constants.ResourceType.STORAGE_POOL: consts.STORAGE_POOL_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP, constants.ResourceType.PORT: consts.PORT_CAP } } def get_latest_perf_timestamp(self, context): return self.rest_handler.get_system_time() def list_storage_host_initiators(self, context): return self.rest_handler.list_storage_host_initiators(self.storage_id) def list_storage_hosts(self, context): return self.rest_handler.list_storage_hosts(self.storage_id) def list_storage_host_groups(self, context): return self.rest_handler.list_storage_host_groups(self.storage_id) def list_volume_groups(self, context): return self.rest_handler.list_volume_groups(self.storage_id) def list_masking_views(self, context): return self.rest_handler.list_masking_views(self.storage_id) ================================================ FILE: delfin/drivers/dell_emc/power_store/rest_handler.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import hashlib import threading import time from decimal import Decimal import requests import six from oslo_log import log as logging from oslo_utils import units from delfin import exception, utils, cryptor from delfin.common import constants from delfin.drivers.dell_emc.power_store import consts from delfin.drivers.utils.rest_client import RestClient from delfin.i18n import _ LOG = logging.getLogger(__name__) class RestHandler(RestClient): REST_LOGIN_SESSION_URL = '/api/rest/login_session' REST_LOGOUT_URL = '/api/rest/logout' REST_CLUSTER_URL = \ '/api/rest/cluster?select=name,compatibility_level,global_id,state,' \ 'primary_appliance_id,id,system_time&limit=2000&offset={}' REST_APPLIANCE_URL = '/api/rest/appliance?select=id,name,model' \ '&limit=2000&offset={}' REST_SOFTWARE_INSTALLED_URL = \ '/api/rest/software_installed?select=id,release_version,' \ 'build_version,appliance&limit=2000&offset={}' REST_VOLUME_URL = '/api/rest/volume?select=id,name,description,state,' \ 'type,wwn,size,appliance_id&limit=2000&offset={}' REST_GENERATE_URL = '/api/rest/metrics/generate' REST_FC_PORT_URL = \ '/api/rest/fc_port?select=appliance_id,current_speed,id,is_link_up,' \ 'name,partner_id,supported_speeds,wwn,node_id,sfp_id' \ '&limit=2000&offset={}' REST_ETH_PORT_URL = \ '/api/rest/eth_port?select=appliance_id,current_speed,id,is_link_up,' \ 'name,partner_id,supported_speeds,mac_address,node_id,sfp_id' \ '&limit=2000&offset={}' REST_SAS_PORT_URL = \ '/api/rest/sas_port?select=appliance_id,current_speed,id,' \ 'is_link_up,name,node_id,speed,sfp_id&limit=2000&offset={}' REST_HARDWARE_URL = \ '/api/rest/hardware?select=name,extra_details,id,lifecycle_state,' \ 'serial_number,slot,type,appliance_id,status_led_state' \ '&limit=2000&offset={}' REST_NODE_URL = \ '/api/rest/node?select=appliance_id,id,slot&limit=2000&offset={}' REST_ALERT_URL = \ '/api/rest/alert?select=id,description_l10n,severity,resource_name,' \ 'resource_type,raised_timestamp,state,event_code,resource_id' \ '&limit=2000&offset={}' REST_SNMP_ALERT_URL = \ '/api/rest/alert?select=id,description_l10n,severity,resource_name,' \ 'resource_type,raised_timestamp,state&limit=2000&offset=0' \ '&description_l10n=in.({})&snmp_sent_timestamp=not.is.null' \ '&order=snmp_sent_timestamp' REST_INITIATOR_URL = '/api/rest/initiator?select=id,port_name,port_type,' \ 'host_id&limit=2000&offset={}' REST_HOST_URL = '/api/rest/host?select=id,name,host_initiators,os_type,' \ 'description&limit=2000&offset={}' REST_HOST_GROUP_URL = '/api/rest/host_group?select=id,name,hosts,' \ 'description&limit=2000&offset={}' REST_VOLUME_GROUP_URL = '/api/rest/volume_group?select=description,name,' \ 'id,volumes&limit=2000&offset={}' REST_HOST_VOLUME_MAPPING_URL = \ '/api/rest/host_volume_mapping?select=host_group_id,host_id,id,' \ 'volume_id&limit=2000&offset={}' REST_IP_POOL_ADDRESS_URL = \ '/api/rest/ip_pool_address?select=id,name,address,appliance_id,' \ 'node_id,purposes&limit=2000&offset={}' REST_METRICS_ARCHIVE_URL = '/api/rest/metrics_archive' REST_FILE_SYSTEM_URL = '/api/rest/file_system' REST_FILE_TREE_QUOTA_URL = '/api/rest/file_tree_quota' REST_SMB_SHARE_URL = '/api/rest/smb_share' REST_NFS_SERVER_URL = '/api/rest/nfs_server' REST_FILE_USER_QUOTA_URL = '/api/rest/file_user_quota' AUTH_KEY = 'DELL-EMC-TOKEN' def __init__(self, **kwargs): super(RestHandler, self).__init__(**kwargs) rest_access = kwargs.get('rest') self.username = rest_access.get('username') self.session_lock = threading.Lock() def login(self): try: with self.session_lock: if self.session is None: self.init_http_head() self.session.auth = requests.auth.HTTPBasicAuth( self.rest_username, cryptor.decode(self.rest_password)) res = self.call_with_token(RestHandler.REST_LOGIN_SESSION_URL) if res.status_code == 200 or res.status_code == 206: self.session.headers[RestHandler.AUTH_KEY] = \ cryptor.encode(res.headers[RestHandler.AUTH_KEY]) else: LOG.error("Login error.URL: %s,Reason: %s.", RestHandler.REST_LOGIN_SESSION_URL, res.text) if 'Unauthorized' in res.text: raise exception.InvalidUsernameOrPassword() elif 'Forbidden' in res.text: raise exception.InvalidIpOrPort() else: raise exception.StorageBackendException( six.text_type(res.text)) except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e def call_with_token(self, url, data=None, method='GET', calltimeout=consts.DEFAULT_TIMEOUT): auth_key = None if self.session: auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None) if auth_key: self.session.headers[RestHandler.AUTH_KEY] \ = cryptor.decode(auth_key) res = self.do_call(url, data, method, calltimeout) if auth_key: self.session.headers[RestHandler.AUTH_KEY] = auth_key return res def logout(self): res = self.call_with_token(RestHandler.REST_LOGOUT_URL, None, 'POST') if res.status_code != consts.StatusCode.SUCCESS_NO_CONTENT and \ res.status_code != consts.StatusCode.SUCCESS_CREATE_RESPONSE: LOG.error("logout error.URL: %s,Reason: %s.", RestHandler.REST_LOGOUT_URL, res.text) raise exception.StorageBackendException(six.text_type(res.text)) def rest_call(self, url, data=None, method='GET', offset=0, result=None, count=0): if result is None: result = [] if '{}' in url: res = self.call_with_token(url.format(offset), data, method) else: res = self.call_with_token(url, data, method) if res.status_code == consts.StatusCode.SUCCESS: result.extend(res.json()) elif res.status_code == consts.StatusCode.PARTIAL_CONTENT: result.extend(res.json()) if len(res.json()) == consts.LIMIT_COUNT: offset += consts.LIMIT_COUNT self.rest_call(url, data, method, offset, result, count) elif res.status_code == consts.StatusCode.UNAUTHORIZED or \ res.status_code == consts.StatusCode.FORBIDDEN: if count < consts.DigitalConstant.THREE: self.login() count = count + consts.DigitalConstant.ONE self.rest_call(url, data, method, offset, result, count) return result def get_storage(self, storage_id): clusters = self.rest_call(self.REST_CLUSTER_URL) if not clusters: LOG.error('The cluster data is empty') raise exception.StorageBackendException( 'The cluster data is empty') cluster = clusters[consts.DigitalConstant.ZERO] appliance_id = cluster.get('primary_appliance_id') appliances = self.rest_call(self.REST_APPLIANCE_URL) model = '' for appliance in appliances: if appliance_id == appliance.get('id'): model = appliance.get('model') pools = self.get_storage_pools(storage_id) total_capacity = consts.DigitalConstant.ZERO used_capacity = consts.DigitalConstant.ZERO for pool in pools: total_capacity += pool.get('total_capacity') used_capacity += pool.get('used_capacity') disks = self.get_disks(storage_id) storage_result = { 'model': model, 'total_capacity': total_capacity, 'raw_capacity': sum(disk.get('capacity') for disk in disks), 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity, 'vendor': 'DELL EMC', 'name': cluster.get('name'), 'serial_number': cluster.get('global_id'), 'firmware_version': self.get_firmware_version(appliance_id), 'status': consts.STORAGE_STATUS_MAP.get( cluster.get('state'), constants.StorageStatus.UNKNOWN) } return storage_result def get_firmware_version(self, appliance_id): software_s = self.rest_call(RestHandler.REST_SOFTWARE_INSTALLED_URL) for software in software_s: appliance_d = software.get('appliance') if not appliance_d: continue software_appliance_id = appliance_d.get('id') if appliance_id == software_appliance_id: return software.get('release_version') def get_storage_pools(self, storage_id): list_pool = [] appliances = self.rest_call(RestHandler.REST_APPLIANCE_URL) for appliance in appliances: appliance_id = appliance.get('id') data = {'entity': consts.SPACE_METRICS_BY_APPLIANCE, 'entity_id': appliance_id} appliance_spaces = self.rest_call(self.REST_GENERATE_URL, data, 'POST') if not appliance_spaces: LOG.error('The pools space data is empty') raise exception.StorageBackendException( 'The pools space data is empty') appliance_space = \ appliance_spaces[consts.DigitalConstant.MINUS_ONE] total_capacity = appliance_space.get('physical_total') used_capacity = appliance_space.get('physical_used') pool_result = { 'name': appliance.get('name'), 'storage_id': storage_id, 'native_storage_pool_id': appliance_id, 'status': constants.StoragePoolStatus.NORMAL, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity } list_pool.append(pool_result) return list_pool def get_volumes(self, storage_id): list_volume = [] volumes = self.rest_call(self.REST_VOLUME_URL) for volume in volumes: snapshot_type = volume.get('type') if consts.CHARACTER_SNAPSHOT == snapshot_type: continue volume_type = consts.VOLUME_TYPE_MAP.get( snapshot_type, constants.VolumeType.THIN) status = consts.VOLUME_STATUS_MAP.get( volume.get('state'), constants.StorageStatus.UNKNOWN) volume_id = volume.get('id') total_capacity = volume.get('size') used_capacity = self.get_volume_used_capacity( volume_id, volume_type, total_capacity) volume_result = { 'name': volume.get('name'), 'storage_id': storage_id, 'description': volume.get('description'), 'status': status, 'native_volume_id': volume_id, 'native_storage_pool_id': volume.get('appliance_id'), 'wwn': volume.get('wwn') if volume.get('wwn') else '', 'type': volume_type, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity } list_volume.append(volume_result) return list_volume def get_volume_used_capacity(self, volume_id, volume_type, used_capacity): if volume_type == constants.VolumeType.THICK: return used_capacity data = {'entity': consts.SPACE_METRICS_BY_VOLUME, 'entity_id': volume_id} volumes_spaces = self.rest_call(self.REST_GENERATE_URL, data, 'POST') if volumes_spaces: volumes_space = \ volumes_spaces[consts.DigitalConstant.MINUS_ONE] used_capacity = volumes_space.get('logical_used') return used_capacity def get_disks(self, storage_id): disk_list = [] hardware_list = self.rest_call(self.REST_HARDWARE_URL) for hardware in hardware_list: lifecycle_state = hardware.get('lifecycle_state') if consts.CHARACTER_DRIVE != hardware.get('type') or \ lifecycle_state == consts.CHARACTER_EMPTY: continue extra_details = hardware.get('extra_details') capacity = None firmware = '' physical_type = constants.DiskPhysicalType.UNKNOWN if extra_details: firmware = extra_details.get('firmware_version') drive_type = extra_details.get('drive_type') if drive_type in consts.DiskType.ALL: continue physical_type = consts.DISK_PHYSICAL_TYPE.get( drive_type, constants.DiskPhysicalType.UNKNOWN) capacity = extra_details.get('size') hardware_name = hardware.get('name') if not capacity: LOG.warning("disk capacity is null: %s", hardware_name) continue disk_result = { 'name': hardware_name, 'storage_id': storage_id, 'native_disk_id': hardware.get('id'), 'serial_number': hardware.get('serial_number'), 'manufacturer': 'DELL EMC', 'firmware': firmware, 'capacity': capacity, 'status': consts.DISK_STATUS_MAP.get( lifecycle_state, constants.DiskStatus.NORMAL), 'physical_type': physical_type, 'logical_type': constants.DiskLogicalType.UNKNOWN, 'location': str(hardware.get('slot')) } disk_list.append(disk_result) return disk_list def get_controllers(self, storage_id): list_controllers = [] nodes = self.get_node() ips = self.get_ip() hardware_list = self.rest_call(self.REST_HARDWARE_URL) for hardware in hardware_list: lifecycle_state = hardware.get('lifecycle_state') if consts.CHARACTER_NODE != hardware.get('type') or \ lifecycle_state == consts.CHARACTER_EMPTY: continue slot = hardware.get('slot') appliance_id = hardware.get('appliance_id') node_id = nodes.get(f'{appliance_id}{slot}') address = ips.get(f'{appliance_id}{node_id}') if not address: LOG.warning('mgmt_ip is empty,' ' Exceptions may occur in snmptrap') extra_details = hardware.get('extra_details') memory_size = '' cpu_info = '' if extra_details: memory_size = extra_details.get( 'physical_memory_size_gb', 0) * units.Gi cpu_info = extra_details.get('cpu_model') full_name = hardware.get('name') if full_name: name = full_name.split('-')[ consts.DigitalConstant.MINUS_ONE] else: LOG.warning('The name of hardware is empty') continue controller_result = { 'name': name, 'storage_id': storage_id, 'native_controller_id': hardware.get('id'), 'status': consts.CONTROLLER_STATUS_MAP.get( lifecycle_state, constants.ControllerStatus.UNKNOWN), 'location': f'{name}:Slot-{slot}', 'cpu_info': cpu_info, 'cpu_count': consts.DigitalConstant.ONE, 'memory_size': memory_size, 'mgmt_ip': address } list_controllers.append(controller_result) return list_controllers def get_node(self): node_dict = {} nodes = self.rest_call(self.REST_NODE_URL) for node in nodes: appliance_id = node.get('appliance_id') slot = node.get('slot') node_id = node.get('id') node_dict[f'{appliance_id}{slot}'] = node_id return node_dict def get_ip(self): ip_dict = {} ip_pool_address = self.rest_call(self.REST_IP_POOL_ADDRESS_URL) for ip_address in ip_pool_address: purposes_list = ip_address.get('purposes') if consts.MGMT_NODE_COREOS not in purposes_list: continue address = ip_address.get('address') appliance_id = ip_address.get('appliance_id') node_id = ip_address.get('node_id') ip_dict[f'{appliance_id}{node_id}'] = address return ip_dict def get_appliance_name(self): appliance_name = {} appliances = self.rest_call(self.REST_APPLIANCE_URL) for appliance in appliances: appliance_name[appliance.get('id')] = appliance.get('name') return appliance_name def get_port_hardware(self): hardware_dict = {} hardware_list = self.rest_call(self.REST_HARDWARE_URL) for hardware in hardware_list: hardware_dict[hardware.get('id')] = hardware return hardware_dict def get_fc_ports(self, storage_id, hardware_dict, appliance_name_dict): list_fc_ports = [] fc_res = self.rest_call(self.REST_FC_PORT_URL) for fc in fc_res: appliance_id = fc.get('appliance_id') name = fc.get('name') is_link_up = fc.get('is_link_up') connection_status = consts.PORT_CONNECTION_STATUS_MAP.get( is_link_up, constants.PortConnectionStatus.UNKNOWN) lifecycle_state = hardware_dict.get( fc.get('sfp_id'), {}).get('lifecycle_state') health_status = consts.PORT_HEALTH_STATUS_MAP.get( lifecycle_state, constants.PortHealthStatus.UNKNOWN) fc_port_result = { 'name': name, 'storage_id': storage_id, 'native_port_id': fc.get('id'), 'location': f'{appliance_name_dict.get(appliance_id)}:{name}', 'connection_status': connection_status, 'health_status': health_status, 'type': constants.PortType.FC, 'speed': self.convert_speed(fc.get('current_speed')), 'max_speed': self.convert_speed(fc.get('supported_speeds')), 'native_parent_id': fc.get('node_id'), 'wwn': fc.get('wwn') } list_fc_ports.append(fc_port_result) return list_fc_ports @staticmethod def convert_speed(supported_speeds): if not supported_speeds: return supported_speed = \ supported_speeds[consts.DigitalConstant.MINUS_ONE]\ if isinstance(supported_speeds, list) else supported_speeds if '_Gbps' in supported_speed: supported_speed = supported_speed.replace('_Gbps', '') return int(supported_speed) * units.G if '_Mbps' in supported_speed: supported_speed = supported_speed.replace('_Mbps', '') return int(supported_speed) * units.M if '_Kbps' in supported_speed: supported_speed = supported_speed.replace('_Kbps', '') return int(supported_speed) * units.k def get_eth_ports(self, storage_id, hardware_dict, appliance_name_dict): list_eth_ports = [] eth_ports = self.rest_call(self.REST_ETH_PORT_URL) for eth in eth_ports: name = eth.get('name') appliance_id = eth.get('appliance_id') is_link_up = eth.get('is_link_up') connection_status = consts.PORT_CONNECTION_STATUS_MAP.get( is_link_up, constants.PortConnectionStatus.UNKNOWN) lifecycle_state = hardware_dict.get( eth.get('sfp_id'), {}).get('lifecycle_state') health_status = consts.PORT_HEALTH_STATUS_MAP.get( lifecycle_state, constants.PortHealthStatus.UNKNOWN) eth_port_result = { 'name': name, 'storage_id': storage_id, 'native_port_id': eth.get('id'), 'location': f'{appliance_name_dict.get(appliance_id)}:{name}', 'connection_status': connection_status, 'health_status': health_status, 'type': constants.PortType.ETH, 'speed': self.convert_speed(eth.get('current_speed')), 'max_speed': self.convert_speed(eth.get('supported_speeds')), 'native_parent_id': eth.get('node_id'), 'mac_address': eth.get('mac_address') } list_eth_ports.append(eth_port_result) return list_eth_ports def get_sas_ports(self, storage_id, hardware_dict, appliance_name_dict): list_sas_ports = [] sas_ports = self.rest_call(self.REST_SAS_PORT_URL) for sas in sas_ports: name = sas.get('name') appliance_id = sas.get('appliance_id') is_link_up = sas.get('is_link_up') connection_status = consts.PORT_CONNECTION_STATUS_MAP.get( is_link_up, constants.PortConnectionStatus.UNKNOWN) lifecycle_state = hardware_dict.get( sas.get('sfp_id'), {}).get('lifecycle_state') health_status = consts.PORT_HEALTH_STATUS_MAP.get( lifecycle_state, constants.PortHealthStatus.UNKNOWN) sas_port_result = { 'name': name, 'storage_id': storage_id, 'native_port_id': sas.get('id'), 'location': f'{appliance_name_dict.get(appliance_id)}:{name}', 'connection_status': connection_status, 'health_status': health_status, 'type': constants.PortType.SAS, 'speed': self.convert_speed(sas.get('speed')), 'native_parent_id': sas.get('node_id') } list_sas_ports.append(sas_port_result) return list_sas_ports def list_alerts(self, query_para=None): alerts = self.rest_call(self.REST_ALERT_URL) alerts_list = [] for alert in alerts: if 'CLEARED' == alert.get('state'): continue raised_timestamp = alert.get('raised_timestamp') time_difference = self.get_time_difference() timestamp_s = datetime.datetime.strptime( raised_timestamp, consts.UTC_FORMAT).timestamp() timestamp = int((timestamp_s + time_difference) * units.k) if\ raised_timestamp else None if query_para: try: if timestamp is None or timestamp \ < int(query_para.get('begin_time')) or \ timestamp > int(query_para.get('end_time')): continue except Exception as e: LOG.error(e) alerts_model = self.set_alert_model(alert, timestamp) alerts_list.append(alerts_model) return alerts_list @staticmethod def get_time_difference(): time_difference = time.mktime( time.localtime()) - time.mktime(time.gmtime()) return time_difference @staticmethod def get_parse_alerts(snmp_alert): try: if consts.PARSE_ALERT_DESCRIPTION in snmp_alert.keys(): description = snmp_alert.get(consts.PARSE_ALERT_DESCRIPTION) raised_time = snmp_alert.get(consts.PARSE_ALERT_TIME_UTC) timestamp = None if raised_time: time_difference = RestHandler.get_time_difference() timestamp_s = datetime.datetime.strptime( raised_time, consts.SYSTEM_TIME_FORMAT).timestamp() timestamp = int((timestamp_s + time_difference) * units.k) resource_type = snmp_alert.get( consts.PARSE_ALERT_RESOURCE_TYPE) resource_name = snmp_alert.get( consts.PARSE_ALERT_RESOURCE_NAME) location = f'{resource_type}:{resource_name}' event_code = snmp_alert.get(consts.PARSE_ALERT_CODE) resource_id = snmp_alert.get(consts.PARSE_ALERT_RESOURCE_ID) match_key_str = f'{description}{timestamp}{resource_type}' \ f'{resource_name}{event_code}{resource_id}' match_key = hashlib.md5(match_key_str.encode()).hexdigest() alerts_model = { 'alert_id': match_key, 'occur_time': timestamp if timestamp else utils.utcnow_ms(), 'severity': consts.SNMP_SEVERITY_MAP.get( snmp_alert.get(consts.PARSE_ALERT_SEVERITY), constants.Severity.NOT_SPECIFIED), 'category': constants.Category.FAULT, 'location': location if resource_type and resource_name else '', 'type': constants.EventType.EQUIPMENT_ALARM, 'resource_type': resource_type if resource_type else constants.DEFAULT_RESOURCE_TYPE, 'alert_name': description, 'match_key': match_key, 'description': description } return alerts_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing")) raise exception.InvalidResults(msg) def get_alert_sources(self, storage_id): sources_list = [] controllers = self.get_controllers(storage_id) for controller in controllers: mgmt_ip = controller.get('mgmt_ip') mgmt_ip_t = {'host': mgmt_ip} sources_list.append(mgmt_ip_t) return sources_list @staticmethod def set_alert_model(alert, timestamp): description = alert.get('description_l10n') resource_type = alert.get('resource_type') resource_name = alert.get('resource_name') resource_id = alert.get('resource_id') event_code = alert.get('event_code') match_key_str = f'{description}{timestamp}{resource_type}' \ f'{resource_name}{event_code}{resource_id}' alerts_model = { 'alert_id': alert.get('id'), 'occur_time': timestamp, 'severity': consts.ALERT_SEVERITY_MAP.get( alert.get('severity'), constants.Severity.NOT_SPECIFIED), 'category': constants.Category.FAULT, 'location': f'{resource_type}:{resource_name}', 'type': constants.EventType.EQUIPMENT_ALARM, 'resource_type': resource_type, 'alert_name': description, 'match_key': hashlib.md5(match_key_str.encode()).hexdigest(), 'description': description } return alerts_model def list_storage_host_initiators(self, storage_id): list_initiators = self.get_initiators(storage_id) if list_initiators: return list_initiators hosts = self.rest_call(self.REST_HOST_URL) for host in hosts: initiators = host.get('host_initiators') for initiator in (initiators or []): port_name = initiator.get('port_name') initiator_dict = { 'native_storage_host_initiator_id': port_name, 'native_storage_host_id': host.get('id'), 'name': port_name, 'type': consts.INITIATOR_TYPE_MAP.get( initiator.get('port_type'), constants.InitiatorType.UNKNOWN), 'status': constants.InitiatorStatus.UNKNOWN, 'wwn': port_name, 'storage_id': storage_id } list_initiators.append(initiator_dict) return list_initiators def get_initiators(self, storage_id): list_initiators = [] try: initiators = self.rest_call(self.REST_INITIATOR_URL) for initiator in initiators: port_name = initiator.get('port_name') initiator_dict = { 'native_storage_host_initiator_id': initiator.get('id'), 'native_storage_host_id': initiator.get('host_id'), 'name': port_name, 'type': consts.INITIATOR_TYPE_MAP.get( initiator.get('port_type'), constants.InitiatorType.UNKNOWN), 'status': constants.InitiatorStatus.UNKNOWN, 'wwn': port_name, 'storage_id': storage_id } list_initiators.append(initiator_dict) except Exception as e: LOG.error("get initiators error: %s", six.text_type(e)) return list_initiators def list_storage_hosts(self, storage_id): host_list = [] hosts = self.rest_call(self.REST_HOST_URL) for host in hosts: h = { "name": host.get('name'), "storage_id": storage_id, "native_storage_host_id": host.get('id'), 'description': host.get('description') if host.get('description') else '', "os_type": consts.HOST_OS_TYPES_MAP.get( host.get('os_type'), constants.HostOSTypes.UNKNOWN), "status": constants.HostStatus.NORMAL } host_list.append(h) return host_list def list_storage_host_groups(self, storage_id): host_groups = self.rest_call(self.REST_HOST_GROUP_URL) host_group_list = [] storage_host_grp_relation_list = [] for hgroup in (host_groups or []): hgroup_id = hgroup.get('id') hg = { 'native_storage_host_group_id': hgroup_id, 'name': hgroup.get('name'), 'description': hgroup.get('description') if hgroup.get('description') else '', 'storage_id': storage_id } host_group_list.append(hg) for host in (hgroup.get('hosts') or []): host_relation = { 'native_storage_host_group_id': hgroup_id, 'storage_id': storage_id, 'native_storage_host_id': host.get('id') } storage_host_grp_relation_list.append(host_relation) result = { 'storage_host_groups': host_group_list, 'storage_host_grp_host_rels': storage_host_grp_relation_list } return result def list_volume_groups(self, storage_id): volume_groups = self.rest_call(self.REST_VOLUME_GROUP_URL) vol_group_list = [] vol_grp_vol_relation_list = [] for volume_group in volume_groups: volume_group_id = volume_group.get('id') vol_g = { 'name': volume_group.get('name'), 'storage_id': storage_id, 'native_volume_group_id': volume_group_id, 'description': volume_group.get('description') if volume_group.get('description') else '' } vol_group_list.append(vol_g) for volumes in (volume_group.get('volumes') or []): volume_group_relation = { 'storage_id': storage_id, 'native_volume_group_id': volume_group_id, 'native_volume_id': volumes.get('id') } vol_grp_vol_relation_list.append(volume_group_relation) result = { 'volume_groups': vol_group_list, 'vol_grp_vol_rels': vol_grp_vol_relation_list } return result def list_masking_views(self, storage_id): list_masking_views = [] volume_mapping = self.rest_call(self.REST_HOST_VOLUME_MAPPING_URL) for mapping in volume_mapping: native_masking_view_id = mapping.get('id') host_group_id = mapping.get('host_group_id') host_id = mapping.get('host_id') view = { 'native_masking_view_id': native_masking_view_id, 'name': native_masking_view_id, 'native_volume_id': mapping.get('volume_id'), 'storage_id': storage_id } if host_group_id: view['native_storage_host_group_id'] = host_group_id if host_id: view['native_storage_host_id'] = host_id list_masking_views.append(view) return list_masking_views def get_storage_metrics(self, storage_id, resource_metrics, start_time, end_time): storage_metrics = [] clusters = self.rest_call(self.REST_CLUSTER_URL) if not clusters: return storage_metrics cluster = clusters[consts.DigitalConstant.ZERO] cluster_id = cluster.get('id') cluster_name = cluster.get('name') if not cluster_id or not cluster_name: return storage_metrics data = {'entity': consts.PERFORMANCE_METRICS_BY_CLUSTER, 'entity_id': cluster_id, 'interval': consts.PERFORMANCE_METRICS_INTERVAL} packaging_data = self.package_data(data, end_time, start_time) storage_metrics = self.set_metrics_data( cluster.get('global_id'), cluster_name, packaging_data, resource_metrics, constants.ResourceType.STORAGE, storage_id) return storage_metrics def get_pool_metrics(self, storage_id, resource_metrics, start_time, end_time): pool_metrics_list = [] appliances = self.rest_call(self.REST_APPLIANCE_URL) for appliance in appliances: pool_id = appliance.get('id') pool_name = appliance.get('name') if not pool_id or not pool_name: continue data = {'entity': consts.PERFORMANCE_METRICS_BY_APPLIANCE, 'entity_id': pool_id, 'interval': consts.PERFORMANCE_METRICS_INTERVAL} packaging_data = self.package_data(data, end_time, start_time) pool_metrics = self.set_metrics_data( pool_id, pool_name, packaging_data, resource_metrics, constants.ResourceType.STORAGE_POOL, storage_id) pool_metrics_list.extend(pool_metrics) return pool_metrics_list def get_volume_metrics(self, storage_id, resource_metrics, start_time, end_time): volume_metrics_list = [] volumes = self.rest_call(self.REST_VOLUME_URL) for volume in volumes: volume_id = volume.get('id') volume_name = volume.get('name') if not volume_id or not volume_name: continue data = {'entity': consts.PERFORMANCE_METRICS_BY_VOLUME, 'entity_id': volume_id, 'interval': consts.PERFORMANCE_METRICS_INTERVAL} packaging_data = self.package_data(data, end_time, start_time) volume_metrics = self.set_metrics_data( volume_id, volume_name, packaging_data, resource_metrics, constants.ResourceType.VOLUME, storage_id) volume_metrics_list.extend(volume_metrics) return volume_metrics_list def get_controllers_metrics(self, storage_id, resource_metrics, start_time, end_time): controllers_metrics_list = [] controller_dict = self.get_node_hardware() controllers = self.rest_call(self.REST_NODE_URL) for controller in controllers: controller_id = controller.get('id') if not controller_id: continue hardware_id, hardware_name = self.get_resource(controller, controller_dict) if not hardware_id: LOG.info('controllers performance: Unexpected data') continue data = {'entity': consts.PERFORMANCE_METRICS_BY_NODE, 'entity_id': controller_id, 'interval': consts.PERFORMANCE_METRICS_INTERVAL} packaging_data = self.package_data(data, end_time, start_time) controllers_metrics = self.set_metrics_data( hardware_id, hardware_name, packaging_data, resource_metrics, constants.ResourceType.CONTROLLER, storage_id) controllers_metrics_list.extend(controllers_metrics) return controllers_metrics_list @staticmethod def get_resource(controller, controller_dict): appliance_id = controller.get('appliance_id') slot = controller.get('slot') hardware = controller_dict.get(f'{appliance_id}{slot}', {}) hardware_id = hardware.get('id') full_name = hardware.get('name') if full_name: hardware_name = full_name.split('-')[ consts.DigitalConstant.MINUS_ONE] else: hardware_name = hardware_id return hardware_id, hardware_name def get_node_hardware(self): hardware_dict = {} hardware_list = self.rest_call(self.REST_HARDWARE_URL) for hardware in hardware_list: lifecycle_state = hardware.get('lifecycle_state') if consts.CHARACTER_NODE != hardware.get('type') or \ lifecycle_state == consts.CHARACTER_EMPTY: continue slot = hardware.get('slot') appliance_id = hardware.get('appliance_id') key = f'{appliance_id}{slot}' hardware_dict[key] = hardware return hardware_dict def get_fc_port_metrics(self, storage_id, resource_metrics, start_time, end_time): fc_port_metrics_list = [] fc_ports = self.rest_call(self.REST_FC_PORT_URL) for fc_port in fc_ports: fc_port_id = fc_port.get('id') fc_port_name = fc_port.get('name') if not fc_port_id or not fc_port_name: continue data = {'entity': consts.PERFORMANCE_METRICS_BY_FE_FC_PORT, 'entity_id': fc_port_id, 'interval': consts.PERFORMANCE_METRICS_INTERVAL} packaging_data = self.package_data(data, end_time, start_time) fc_port_metrics = self.set_metrics_data( fc_port_id, fc_port_name, packaging_data, resource_metrics, constants.ResourceType.PORT, storage_id) fc_port_metrics_list.extend(fc_port_metrics) return fc_port_metrics_list @staticmethod def set_metrics_data(resource_id, resource_name, packaging_data, resource_metrics, resource_type, storage_id): metrics_list = [] for resource_key in resource_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_id, 'resource_name': resource_name, 'type': 'RAW', 'unit': resource_metrics[resource_key]['unit'] } resource_value = {} for about_timestamp in packaging_data.keys(): metrics_data = packaging_data.get(about_timestamp) resource_value[about_timestamp] = \ metrics_data.get(resource_key) if resource_value: metrics_res = constants.metric_struct( name=resource_key, labels=labels, values=resource_value) metrics_list.append(metrics_res) return metrics_list def package_data(self, data, end_time, start_time): perf_data = self.rest_call(self.REST_GENERATE_URL, data, 'POST') packaging_data = {} duplicate = set() for perf in perf_data: timestamp = perf.get('timestamp') time_difference = self.get_time_difference() timestamp_s = int( datetime.datetime.strptime(timestamp, consts.PERF_TIME_FORMAT) .timestamp() + time_difference) repeat_count = perf.get('repeat_count') if repeat_count > consts.DigitalConstant.ONE: repeat_timestamp_s =\ (repeat_count - consts.DigitalConstant.ONE)\ * consts.PERF_INTERVAL count_timestamp_s = timestamp_s + repeat_timestamp_s count_timestamp_ms = count_timestamp_s * units.k if start_time > count_timestamp_ms: continue for count in range(consts.DigitalConstant.ZERO, repeat_count): count_timestamp_s = timestamp_s + count * consts.PERF_INTERVAL count_timestamp_ms = count_timestamp_s * units.k about_timestamp = \ int(count_timestamp_s / consts.DigitalConstant.SIXTY) \ * consts.DigitalConstant.SIXTY * units.k if count_timestamp_ms < start_time or \ count_timestamp_ms >= end_time \ or about_timestamp in duplicate: continue duplicate.add(about_timestamp) cpu_utilization = perf.get('io_workload_cpu_utilization') metrics_d = { 'iops': Decimal(str(perf.get('total_iops'))).quantize( Decimal('0'), rounding="ROUND_HALF_UP"), "readIops": Decimal(str(perf.get('read_iops'))).quantize( Decimal('0'), rounding="ROUND_HALF_UP"), "writeIops": Decimal(str(perf.get('write_iops'))).quantize( Decimal('0'), rounding="ROUND_HALF_UP"), "throughput": round( perf.get('total_bandwidth') / units.Mi, 3), "readThroughput": round( perf.get('read_bandwidth') / units.Mi, 3), "writeThroughput": round( perf.get('write_bandwidth') / units.Mi, 3), "responseTime": round( perf.get('avg_latency') / units.k, 3), "readResponseTime": round( perf.get('avg_read_latency') / units.k, 3), "writeResponseTime": round( perf.get('avg_write_latency') / units.k, 3), "ioSize": round(perf.get('avg_io_size') / units.Ki, 3), "readIoSize": round( perf.get('avg_read_size') / units.Ki, 3), "writeIoSize": round( perf.get('avg_write_size') / units.Ki, 3), "cpuUsage": Decimal(str(cpu_utilization)).quantize( Decimal('0.000'), rounding="ROUND_HALF_UP") if cpu_utilization else '', 'time': about_timestamp } packaging_data[about_timestamp] = metrics_d return packaging_data def get_system_time(self): clusters = self.rest_call(self.REST_CLUSTER_URL) if clusters: cluster = clusters[consts.DigitalConstant.ZERO] system_time = cluster.get('system_time') time_difference = self.get_time_difference() timestamp_s = datetime.datetime.strptime( system_time, consts.SYSTEM_TIME_FORMAT).timestamp() timestamp = int((timestamp_s + time_difference) * units.k)\ if system_time else None return timestamp ================================================ FILE: delfin/drivers/dell_emc/scaleio/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/scaleio/alert_consts.py ================================================ # Copyright 2022 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ALERT_MAP = { 'DEVICE_FAILED': 'Device failed', 'SDC_DISCONNECTED': 'SDC disconnected', 'MDM_NOT_CLUSTERED': 'MDM is not clustered', 'SDS_DISCONNECTED ': 'SDS is disconnected', 'SDS_DISCONNECTS_FREQUENTLY ': 'SDS disconnects frequently ', 'SDS_RMCACHE_MEMORY_ALLOCATION_FAILED': 'Memory allocation for RAM ReadCache failed on SDS', 'STORAGE_POOL_HAS_CAPACITY_ERRORS': 'Storage Pool has capacity errors', 'STORAGE_POOL_HAS_FAILED_CAPACITY': 'Storage Pool has failed capacity', 'STORAGE_POOL_HAS_DEGRADED_CAPACITY': 'Storage Pool has degraded capacity', 'STORAGE_POOL_HAS_UNREACHABLE_CAPACITY': 'Storage Pool has decreased capacity', 'STORAGE_POOL_HAS_UNAVAILABLE_UNUSED_CAPACITY': 'Storage Pool has unavailable-unused capacity', 'STORAGE_POOL_UNBALANCED': 'Storage Pool is unbalanced ', 'CAPACITY_UTILIZATION_ABOVE_CRITICAL_THRESHOLD': 'Capacity utilization above critical threshold', 'CAPACITY_UTILIZATION_ABOVE_HIGH_THRESHOLD': 'Capacity utilization above high threshold', 'CONFIGURED_SPARE_CAPACITY_SMALLER_THAN_LARGEST_FAULT_UNIT': 'Configured spare capacity is smaller than largest fault unit', 'SPARE_CAPACITY_AND_FREE_CAPACITY_SMALLER_THAN_LARGEST_FAULT_UNIT': 'Spare capacity and free capacity are smaller ' 'than the largest fault unit', 'SPARE_CAPACITY_BELOW_THRESHOLD ': 'Spare capacity is below threshold', 'LICENSE_EXPIRED': 'License expired', 'LICENSE_ABOUT_TO_EXPIRE ': 'License will expire in %d days', 'FWD_REBUILD_STUCK ': 'Forward rebuild cannot proceed ', 'BKWD_REBUILD_STUCK': 'Backward rebuild cannot proceed', 'REBALANCE_STUCK ': 'Rebalance cannot proceed ', 'MDM_FAILS_OVER_FREQUENTLY': 'MDM fails over frequently', 'FAILURE_RECOVERY_CAPACITY_BELOW_THRESHOLD': 'Failure recovery capacity is below the threshold', 'DEVICE_PENDING_ACTIVATION': 'Device test is done and device is pending activation', 'PD_INACTIVE ': 'Inactive Protection Domain', 'DRL_MODE_NON_VOLATILE': 'DRL mode: Hardened ', 'NOT_ENOUGH_FAULT_UNITS_IN_SP ': 'Storage Pool does not meet the minimum requirement of 3 fault units', 'SDC_MAX_COUNT': 'No more SDCs can be defined on this system; ' 'the maximum has been reached', 'FIXED_READ_ERROR_COUNT_ABOVE_THRESHOLD': 'Device has fixed read errors ', 'SCANNER_COMPARE_ERROR': 'Background device scanning has found data conflicts', 'STORAGE_POOL_EXTREMELY_UNBALANCED': 'The Storage Pool relies too heavily(over 50%)on capacity from a ' 'single SDS or Fault SetBalance capacity over other SDSs or Fault Sets' } ================================================ FILE: delfin/drivers/dell_emc/scaleio/consts.py ================================================ # Copyright 2022 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from delfin.common import constants StorageVendor = 'DELL EMC' DEFAULT_TIMEOUT = 10 REST_AUTH_LOGIN = '/api/login' REST_AUTH_LOGOUT = '/api/logout' REST_SCALEIO_SYSTEM = '/api/types/System/instances' REST_SCALEIO_STORAGE_POOL = '/api/types/StoragePool/instances' REST_SCALEIO_VOLUMES = '/api/types/Volume/instances' REST_SCALEIO_DISKS = '/api/types/Device/instances' REST_SCALIO_HOSTS = '/api/types/Sdc/instances' REST_SCALIO_INITIIATORS = '/api/types/Sds/instances' REST_SCALEIO_ALERT = '/api/types/Alert/instances' DEFAULT_ALERTS_TIME_CONVERSION = 1000 DEFAULT_VOLUME_USERD_CAPACITY = 0 DATETIME_UTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' OID_SEVERITY = '1139.101.1.1' OID_EVENT_TYPE = '1139.101.1.2' OID_ERR_ID = '1139.101.1.3' OID_EVENT_ID = '1139.101.1.4' TRAP_ALERT_MAP = { '5': constants.Severity.CRITICAL, '2': constants.Severity.WARNING, } ================================================ FILE: delfin/drivers/dell_emc/scaleio/rest_handler.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import six import json import requests import datetime import time from oslo_log import log from oslo_utils import units from delfin import exception from delfin import cryptor from delfin.common import alert_util from delfin.drivers.utils.rest_client import RestClient from delfin.drivers.dell_emc.scaleio import consts from delfin.drivers.dell_emc.scaleio import alert_consts from delfin.common import constants LOG = log.getLogger(__name__) class RestHandler(RestClient): def __init__(self, **kwargs): super().__init__(**kwargs) self.init_http_head() self.login() def login(self): try: res = self.get_rest_info(consts.REST_AUTH_LOGIN, 'login', 'GET') if res: self.rest_auth_token = res else: LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", {"url": consts.REST_AUTH_LOGIN, "reason": res.text}) if 'User authentication failed' in res.text: raise exception.InvalidUsernameOrPassword() else: raise exception.StorageBackendException( six.text_type(res.text)) except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise exception.InvalidResults(e) def logout(self): try: if self.session: self.session.close() except Exception as e: err_msg = "Logout error: %s" % (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(e) def get_storage(self, storage_id): try: storage_json = self.get_rest_info(consts.REST_SCALEIO_SYSTEM) for system_json in (storage_json or []): system_id = system_json.get('id') system_links = json.loads(json.dumps( system_json.get('links'))) total_capacity = 0 used_capacity = 0 raw_capacity = 0 if not system_links: continue storage_disk_list = self.list_disks(storage_id) for storage_disk in storage_disk_list: raw_capacity += storage_disk.get('capacity') mdm_cluster = json.loads(json.dumps( system_json.get('mdmCluster'))) version_info = json.dumps( system_json.get('systemVersionName')) version_detail = version_info.split(' Version: ') version_id = version_detail[1].replace('\"', '') model = version_detail[0].replace('\"', '') cluster_state = mdm_cluster.get('clusterState') status = constants.StorageStatus.OFFLINE if 'Degraded' in cluster_state: status = constants.StorageStatus.DEGRADED elif 'Normal' in cluster_state: status = constants.StorageStatus.NORMAL for system_link in system_links: if 'Statistics' in system_link.get('href'): storage_detail = self.get_rest_info( system_link.get('href')) total_capacity = storage_detail. \ get('maxCapacityInKb') used_capacity = storage_detail. \ get('capacityInUseInKb') storage_map = { 'name': 'ScaleIO', 'vendor': consts.StorageVendor, 'model': model, 'status': status, 'serial_number': system_id, 'firmware_version': version_id, 'raw_capacity': raw_capacity, 'total_capacity': int(total_capacity) * units.Ki, 'used_capacity': int(used_capacity) * units.Ki, 'free_capacity': int(total_capacity - used_capacity) * units.Ki } return storage_map except exception.DelfinException as err: err_msg = "Get Storage System error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage System error: %s", six.text_type(e)) raise exception.InvalidResults(e) def list_storage_pools(self, storage_id): storage_pool_list = [] try: storage_pool_json = self.get_rest_info( consts.REST_SCALEIO_STORAGE_POOL) for pool_json in (storage_pool_json or []): pool_name = pool_json.get('name') native_storage_pool_id = pool_json.get('id') pool_links = pool_json.get('links') used_capacity = 0 total_size = 0 for pool_link in pool_links: if 'Statistics' in pool_link.get('rel'): storage_pool_statics = self.get_rest_info( pool_link.get('href')) json.dumps(storage_pool_statics) used_capacity = storage_pool_statics.\ get('capacityInUseInKb') total_size = storage_pool_statics.\ get('maxCapacityInKb') pool_map = { 'name': pool_name, 'storage_id': storage_id, 'native_storage_pool_id': native_storage_pool_id, 'status': constants.StorageStatus.NORMAL, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': int(total_size) * units.Ki, 'used_capacity': int(used_capacity) * units.Ki, 'free_capacity': int(total_size - used_capacity) * units.Ki } storage_pool_list.append(pool_map) return storage_pool_list except exception.DelfinException as err: err_msg = "Get Storage pool error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage pool error: %s", six.text_type(e)) raise exception.InvalidResults(e) def list_volumes(self, storage_id): list_volumes = [] try: storage_volume_json = self.get_rest_info( consts.REST_SCALEIO_VOLUMES) for json_volume in (storage_volume_json or []): volume_name = json_volume.get('name') native_storage_pool_id = json_volume.get('storagePoolId') native_volume_id = json_volume.get('id') total_size = json_volume.get('sizeInKb') volume_type = constants.VolumeType.THIN if 'Thick' in json_volume.get('volumeType'): volume_type = constants.VolumeType.THICK volume_map = { 'name': volume_name, 'storage_id': storage_id, 'description': volume_name, 'status': 'normal', 'native_volume_id': native_volume_id, 'native_storage_pool_id': native_storage_pool_id, 'wwn': native_volume_id, 'type': volume_type, 'total_capacity': int(total_size) * units.Ki, 'free_capacit': consts.DEFAULT_VOLUME_USERD_CAPACITY, 'used_capacity': consts.DEFAULT_VOLUME_USERD_CAPACITY, 'compressed': True, 'deduplicated': True } list_volumes.append(volume_map) return list_volumes except exception.DelfinException as err: err_msg = "Get Storage volume error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage volume error: %s", six.text_type(e)) raise exception.InvalidResults(e) def list_disks(self, storage_id): disks_list = [] try: storage_disks_json = self.get_rest_info(consts.REST_SCALEIO_DISKS) for json_disk in (storage_disks_json or []): device_status = json_disk.get('deviceState') capacity = json_disk.get('maxCapacityInKb') status = constants.DiskStatus.NORMAL if device_status != 'Normal': status = constants.DiskStatus.OFFLINE disk_map = { 'native_disk_id': json_disk.get('id'), 'name': json_disk.get('name'), 'status': status, 'storage_id': storage_id, 'native_disk_group_id': json_disk.get('sdsId'), 'serial_number': json_disk.get('id'), 'capacity': int(capacity) * units.Ki, 'health_score': status } disks_list.append(disk_map) return disks_list except exception.DelfinException as err: err_msg = "Get Storage disk error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage disk error: %s", six.text_type(e)) raise exception.InvalidResults(e) def list_alerts(self, query_para=None): alert_list = [] try: storage_alert = self.get_rest_info(consts.REST_SCALEIO_ALERT) alert_description_map = alert_consts.ALERT_MAP for json_alert in (storage_alert or []): match_key = json_alert.get('id') + json_alert.get('name') occur_time = json_alert.get('startTime') datetime_obj = datetime.datetime.strptime( occur_time, consts.DATETIME_UTC_FORMAT) alert_time = int(time.mktime(datetime_obj.timetuple()) * consts.DEFAULT_ALERTS_TIME_CONVERSION + datetime_obj.microsecond / consts.DEFAULT_ALERTS_TIME_CONVERSION) alert_type_desc = json_alert.get('alertType') alert_type_desc = alert_type_desc.lower().replace('_', ' ') if not alert_util.is_alert_in_time_range(query_para, alert_time): continue alert_severity = json_alert.get('severity') if 'LOW' in alert_severity: alert_severity = constants.Severity.MINOR elif 'MEDIUM' in alert_severity: alert_severity = constants.Severity.CRITICAL elif 'HIGH' in alert_severity: alert_severity = constants.Severity.FATAL alert_type = json_alert.get('alertType') alert_model = { 'alert_id': json_alert.get('id'), 'alert_name': alert_type + json_alert.get('name'), 'severity': alert_severity, 'category': constants.Category.FAULT, 'type': alert_type, 'sequence_number': json_alert.get('uuid'), 'description': alert_description_map.get( json_alert.get('alertType'), alert_type_desc), 'occur_time': alert_time, 'match_key': hashlib.md5( match_key.encode()).hexdigest() } alert_list.append(alert_model) return alert_list except exception.DelfinException as err: err_msg = "Get Storage alerts error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage alerts error: %s", six.text_type(e)) raise exception.InvalidResults(e) def list_storage_host_initiators(self, storage_id): initiators_list = [] try: storage_initiators = self.get_rest_info( consts.REST_SCALIO_INITIIATORS) list_host = self.list_storage_hosts(storage_id) for initiators_json in (storage_initiators or []): status = initiators_json.get('sdsState') initiators_id = initiators_json.get('id') initiators_type = constants.InitiatorType.UNKNOWN if 'iscsi' in initiators_json.get('perfProfile'): initiators_type = constants.InitiatorType.ISCSI if 'Normal' == status: status = constants.InitiatorStatus.ONLINE elif 'Disconnected' == status: status = constants.InitiatorStatus.OFFLINE ip_list = initiators_json.get('ipList') native_storage_host_id = None for ip_data in ip_list: sds_ip = ip_data.get('ip') for host_json in list_host: ip_address = host_json.get('ip_address') if sds_ip == ip_address: native_storage_host_id = \ host_json.get('native_storage_host_id') initiators_dict = { "name": initiators_json.get('name'), "storage_id": storage_id, "native_storage_host_initiator_id": initiators_id, "wwn": initiators_id, "type": initiators_type, "status": status, "native_storage_host_id": native_storage_host_id, } initiators_list.append(initiators_dict) return initiators_list except exception.DelfinException as err: err_msg = "Get Storage initiators error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage initiators error: %s", six.text_type(e)) raise exception.InvalidResults(e) def list_storage_hosts(self, storage_id): host_list = [] try: storage_hosts = self.get_rest_info(consts.REST_SCALIO_HOSTS) for host_json in (storage_hosts or []): status = host_json.get('mdmConnectionState') if 'Connected' == status: status = constants.HostStatus.NORMAL elif 'Disconnected' == status: status = constants.HostStatus.OFFLINE ip_address = host_json.get('sdcIp') soft_version = host_json.get('softwareVersionInfo') host_dict = { "name": host_json.get('sdcGuid'), "description": ip_address + soft_version, "storage_id": storage_id, "native_storage_host_id": host_json.get('id'), "os_type": host_json.get('osType'), "status": status, "ip_address": ip_address } host_list.append(host_dict) return host_list except exception.DelfinException as err: err_msg = "Get Storage hosts error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage hosts error: %s", six.text_type(e)) raise exception.InvalidResults(e) def list_masking_views(self, storage_id): list_masking_views_list = [] try: storage_view = self.get_rest_info(consts.REST_SCALEIO_VOLUMES) for map_json in (storage_view or []): view_name = map_json.get('name') volume_id = map_json.get('id') map_sdc_list = map_json.get('mappedSdcInfo') if map_sdc_list: for map_sdc in map_sdc_list: sdc_id = map_sdc.get('sdcId') view_map = { "name": view_name + sdc_id + volume_id, "description": view_name, "storage_id": storage_id, "native_masking_view_id": view_name + sdc_id + volume_id, 'native_volume_id': volume_id, 'native_storage_host_id': sdc_id } list_masking_views_list.append(view_map) return list_masking_views_list except exception.DelfinException as err: err_msg = "Get Storage Views Error: %s" % err.msg LOG.error(err_msg) raise err except Exception as e: LOG.error("Get Storage Views Error: %s", six.text_type(e)) raise exception.InvalidResults(e) @staticmethod def parse_alert(alert): alert_model = dict() try: alert_dict = alert.split(' ') for alert_json in alert_dict: alert_detail = alert_json.split('=')[1] if consts.OID_SEVERITY in alert_json: severity = consts.TRAP_ALERT_MAP.get( alert_detail, constants.Severity.INFORMATIONAL) alert_model['severity'] = severity elif consts.OID_EVENT_ID in alert_json: alert_model['alert_name'] = alert_detail.replace('\"', '') elif consts.OID_EVENT_TYPE in alert_json: alert_desc = alert_detail.split('.')[2].lower().replace( '_', ' ') alert_model['description'] = alert_desc alert_model['location'] = alert_desc elif consts.OID_ERR_ID in alert_json: alert_model['alert_id'] = str( alert_detail.replace('\"', '')) alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM now = time.time() alert_model['occur_time'] = \ int(round(now * consts.DEFAULT_ALERTS_TIME_CONVERSION)) return alert_model except Exception as e: LOG.error(e) msg = "Failed to build alert model: %s." % (six.text_type(e)) raise exception.InvalidResults(msg) def get_rest_info(self, url, data=None, method='GET'): if 'login' == data: self.session.auth = requests.auth.HTTPBasicAuth( self.rest_username, cryptor.decode(self.rest_password)) else: self.login() self.session.auth = requests.auth.HTTPBasicAuth( self.rest_username, self.rest_auth_token) res = self.do_call(url, data, method) try: if res.status_code == 200: result_json = json.loads(res.text) elif res.status_code == 500: LOG.error('Connect Timeout error') raise exception.ConnectTimeout() elif res.status_code == 401: LOG.error('User authentication failed') raise exception.InvalidUsernameOrPassword else: raise exception.BadResponse() except Exception as err: LOG.exception('Get RestHandler.call failed: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) raise exception.InvalidResults(err) return result_json ================================================ FILE: delfin/drivers/dell_emc/scaleio/scaleio_stor.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # # http:#www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin.drivers import driver from delfin.drivers.dell_emc.scaleio import rest_handler from delfin.drivers.dell_emc.scaleio.rest_handler import RestHandler LOG = log.getLogger(__name__) class ScaleioStorageDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_handler = rest_handler.RestHandler(**kwargs) self.rest_handler.logout() self.rest_handler.verify = kwargs.get('verify', False) self.rest_handler.login() def reset_connection(self, context, **kwargs): self.rest_handler.logout() self.rest_handler.verify = kwargs.get('verify', False) return self.rest_handler.login() def get_storage(self, context): return self.rest_handler.get_storage(self.storage_id) def list_storage_pools(self, context): return self.rest_handler.list_storage_pools(self.storage_id) def list_volumes(self, context): return self.rest_handler.list_volumes(self.storage_id) def list_disks(self, context): return self.rest_handler.list_disks(self.storage_id) def list_alerts(self, context, query_para=None): return self.rest_handler.list_alerts(query_para) @staticmethod def parse_alert(context, alert): return RestHandler.parse_alert(alert) def add_trap_config(self, context, trap_config): pass def clear_alert(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass def list_storage_host_initiators(self, context): return self.rest_handler.list_storage_host_initiators(self.storage_id) def list_storage_hosts(self, context): return self.rest_handler.list_storage_hosts(self.storage_id) def list_masking_views(self, context): return self.rest_handler.list_masking_views(self.storage_id) @staticmethod def get_access_url(): return 'https://{ip}' ================================================ FILE: delfin/drivers/dell_emc/unity/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/unity/alert_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import time import six from oslo_log import log from delfin import exception, utils from delfin.common import alert_util from delfin.common import constants from delfin.drivers.dell_emc.unity import consts from delfin.i18n import _ LOG = log.getLogger(__name__) class AlertHandler(object): OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' OID_NODE = '1.3.6.1.4.1.1139.103.1.18.1.1' OID_COMPONENT = '1.3.6.1.4.1.1139.103.1.18.1.2' OID_SYMPTOMID = '1.3.6.1.4.1.1139.103.1.18.1.3' OID_SYMPTOMTEXT = '1.3.6.1.4.1.1139.103.1.18.1.4' ALERT_LEVEL_MAP = {0: constants.Severity.CRITICAL, 1: constants.Severity.CRITICAL, 2: constants.Severity.CRITICAL, 3: constants.Severity.MAJOR, 4: constants.Severity.WARNING, 5: constants.Severity.FATAL, 6: constants.Severity.INFORMATIONAL, 7: constants.Severity.NOT_SPECIFIED } TRAP_LEVEL_MAP = {'1.3.6.1.4.1.1139.103.1.18.2.0': constants.Severity.CRITICAL, '1.3.6.1.4.1.1139.103.1.18.2.1': constants.Severity.CRITICAL, '1.3.6.1.4.1.1139.103.1.18.2.2': constants.Severity.CRITICAL, '1.3.6.1.4.1.1139.103.1.18.2.3': constants.Severity.MAJOR, '1.3.6.1.4.1.1139.103.1.18.2.4': constants.Severity.WARNING, '1.3.6.1.4.1.1139.103.1.18.2.5': constants.Severity.FATAL, '1.3.6.1.4.1.1139.103.1.18.2.6': constants.Severity.INFORMATIONAL, '1.3.6.1.4.1.1139.103.1.18.2.7': constants.Severity.NOT_SPECIFIED } SECONDS_TO_MS = 1000 SECONDS_PER_HOUR = 60 * 60 STATE_SOLVED = 2 TIME_PATTERN = "%Y-%m-%dT%H:%M:%S.%fZ" @staticmethod def parse_alert(context, alert): try: alert_model = dict() alert_model['alert_id'] = alert.get(AlertHandler.OID_SYMPTOMID) trap_map_desc = consts.TRAP_DESC.get( alert.get(AlertHandler.OID_SYMPTOMID)) if trap_map_desc: alert_desc = trap_map_desc[2] else: alert_desc = alert.get(AlertHandler.OID_SYMPTOMTEXT) alert_model['alert_name'] = alert.get(AlertHandler.OID_SYMPTOMTEXT) alert_model['severity'] = AlertHandler.TRAP_LEVEL_MAP.get( alert.get(AlertHandler.OID_SEVERITY), constants.Severity.INFORMATIONAL) alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM occur_time = utils.utcnow_ms() alert_model['occur_time'] = occur_time alert_model['description'] = alert_desc alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['location'] = alert.get(AlertHandler.OID_NODE) alert_model['match_key'] = hashlib.md5(alert.get( AlertHandler.OID_SYMPTOMTEXT).encode()).hexdigest() return alert_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing")) raise exception.InvalidResults(msg) def parse_queried_alerts(self, alert_model_list, alert_dict, query_para): alerts = alert_dict.get('entries') for alert in alerts: try: content = alert.get('content', {}) if content.get('state') == AlertHandler.STATE_SOLVED: continue occur_time = int(time.mktime(time.strptime( content.get('timestamp'), self.TIME_PATTERN))) hour_offset = (time.mktime(time.localtime()) - time.mktime( time.gmtime())) / AlertHandler.SECONDS_PER_HOUR occur_time = occur_time + (int(hour_offset) * AlertHandler.SECONDS_PER_HOUR) if not alert_util.is_alert_in_time_range( query_para, int(occur_time * AlertHandler.SECONDS_TO_MS)): continue alert_model = {} location = '' resource_type = constants.DEFAULT_RESOURCE_TYPE if content.get('component'): location = content.get('component').get('id') alert_model['alert_id'] = content.get('messageId') alert_model['alert_name'] = content.get('message') alert_model['severity'] = self.ALERT_LEVEL_MAP.get( content.get('severity'), constants.Severity.INFORMATIONAL) alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = content.get('id') alert_model['occur_time'] = int(occur_time * AlertHandler.SECONDS_TO_MS) alert_model['description'] = content.get('description') alert_model['resource_type'] = resource_type alert_model['location'] = location alert_model['match_key'] = hashlib.md5( content.get('message').encode()).hexdigest() if alert_model['severity'] == 'Informational': continue alert_model_list.append(alert_model) except Exception as e: LOG.error(e) err_msg = "Failed to build alert model as some attributes " \ "missing in queried alerts: %s" % (six.text_type(e)) raise exception.InvalidResults(err_msg) ================================================ FILE: delfin/drivers/dell_emc/unity/consts.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. DEFAULT_TIMEOUT = 10 ALERT_TIMEOUT = 20 REST_RETRY_TIMES = 1 TRAP_DESC = { "1:127486a": ["WARNING", "ALRT_LCC_FW_UPGRADE_FAILED", "The link control card (LCC) will continue to function with " "older versions of the software. The next time the affected " "Storage Processor (SP) reboots, the firmware will attempt " "the upgrade again."], "1:127486b": ["WARNING", "ALRT_LCC_FW_UPGRADE_FAILED", "The link control card (LCC) will continue to function with " "older versions of the software. The next time the affected " "Storage Processor (SP) reboots, the firmware will attempt " "the upgrade again."], "1:1278982": ["ERROR", "ALRT_DAE_INVALID_DRIVE", "There is an invalid disk in the Disk Array Enclosure (DAE)." " Replace the disk with the correct type."], "1:12dc501": ["CRITICAL", "ALRT_SYS_POOL_OFFLINE", "An internal system service is offline. Some system " "capabilities may not be available. Contact your service " "provider"], "1:12dcd00": ["CRITICAL", "ALRT_SYS_LUN_OFFLINE", "An internal system service required for metrics is offline." "System metrics are not available. Contact your service " "provider."], "1:1670071": ["INFO", "ALRT_PBL_ENV_CLEARED", "The environmental interface failure has been resolved. " "No action is required."], "1:1678007": ["ERROR", "ALRT_PBL_ESP_ERROR_FUP_FAILED", "Firmware upgrade has failed. Contact " "your service provider."], "1:167800e": ["ERROR", "ALRT_PBL_ESP_ERROR_RESUME_PROM_READ_FAILED", "The Resume Prom Read operation has failed. Contact " "your service provider"], "1:167803d": ["CRITICAL", "ALRT_SSD_PECYCLE_EXPIRE", "%2 is predicted to exceed drive specified write endurance " "in %3 days. It is recommended to replace the drive"], "1:1678049": ["ERROR", "ALRT_PBL_ENV_FAILURE", "An environmental interface failure has been detected. Gath" "er diagnostic materials and contact your service " "provider."], "1:1678052": ["ERROR", "ALRT_PBL_ESP_ERROR_LCC_COMPONENT_FAULTED", "Link Control Card (LCC) has faulted. This failure may be " "caused by a component other than the LCC. Replace the " "faulted disks first. If the problem persists, contact " "your service provider."], "1:167805f": ["ERROR", "ALRT_DRIVE_AFA_FAILED", "One of the system drives failed All-Flash check. Replace " "the non-Flash drive with a Flash drive."], "1:167c00a": ["ERROR", "ALRT_PBL_ESP_PEERSP_POST_FAIL", "The Storage Processor (SP) has faulted. Contact your " "service provider."], "1:1688028": ["ERROR", "ALRT_DISK_USER_DISK_IN_SYSTEM_SLOT", "A bind user disk has been inserted in a system disk slot. " "remove the disk and insert it in a user drive slot."], "1:1688029": ["ERROR", "ALRT_DISK_SYSTEM_DISK_IN_USER_SLOT", "A system disk has been inserted in a wrong slot. Remove the" " disk and insert it in a system disk slot."], "1:16d80c4": ["INFO", "ALRT_SNAPSHOT_INVALIDATION", "Snapshots have been automatically marked for deletion due " "to insufficient pool space."], "1:16f0077": ["INFO", "ALRT_MLU_UDI_RDWT_RESTORE", "The file system is now read-write, because free space in " "its pool has increased more than 12.5 GB."], "1:16f0078": ["INFO", "ALRT_MLU_UDI_SNAPS_OK", "The file system is no longer at risk of losing its " "snapshots, because there is now enough free space in Its " "associated pool."], "1:16f0079": ["INFO", "ALRT_MLU_UDI_ABOUT_NOT_NEEDING_FULL_SYNC", "The file system no longer needs a full synchronization for " "the associated replication session, because there is enough" " free space in the associated pool."], "1:16f4007": ["WARNING", "ALRT_FILESYSTEM_REACHED_CIFS_SHARE_COUNT_THRESHOLD", "Creation of the SMB share has exceeded the 90% threshold " "for the underlying file system or snapshot. Remove " "unnecessary SMB shares from the file system or snapshot."], "1:16f4008": ["WARNING", "ALRT_FILESYSTEM_REACHED_NFS_SHARE_COUNT_THRESHOLD", "Creation of the NFS share has exceeded the 90% threshold " "for the underlying file system or snapshot. Remove " "unnecessary NFS shares from the file system or snapshot."], "1:16f4009": ["WARNING", "ALRT_MLU_UDI_ABOUT_SNAP_INVALIDATION", "Pool containing the file system is low on free space, and " "the file system will lose all of its snapshots. To retain " "the snapshots, add more space to the pool, free up space " "from the pool, or use the CLI to change the file system's " "pool full policy to failWrites."], "1:16f400a": ["WARNING", "ALRT_MLU_UDI_ABOUT_NEEDING_FULL_SYNC", "Pool containing the file system is low on free space, so " "the associated replication session will need a full " "synchronization. To resolve this issue, add more space to " "the pool, free up space from the pool, or use the CLI to " "change the file system's pool full policy to failWrites."], "1:16f400b": ["WARNING", "ALRT_POOL_SPACE_LOW", "The pool space is low and the associated file system is " "configured with a Fail Writes pool full policy. When the " "pool reaches full capacity, any write operations to this " "file system may fail. Change the pool full policy for the " "file system using the CLI or add more space to the pool."], "1:16f8319": ["ERROR", "ALRT_MLU_UDI_RD_ONLY_SNAP_INVALIDATION", "The file system is now read-only, because the pool's free " "space dropped below 4 GB, and its poolFullPolicy is set to " "failWrites. To make the file system read-write with this " "policy, add space to the pool or free up space until there " "is at least 12.5 GB of free space. Alternatively, use the " "CLI to change the file system's pool full policy to " "deleteAllSnaps."], "1:16fc000": ["ERROR", "ALRT_SYS_VDM_OFFLINE", "An internal system service is offline. Some system " "capabilities may not be available. Contact your service " "provider."], "1:1744002": ["WARNING", "ALRT_VVNX_VDISK_EXCEED_MAX_COUNT", "The maximum number of virtual disks is exceeded. One or " "more virtual disks will not be available unless you remove" " some existing virtual disks. Check the system " "log for details."], "1:174c001": ["CRITICAL", "ALRT_LXF_UNSUPPORTED_SCSI_CONTROLLER", "An unsupported virtual SCSI controller was added to the " "system. You should remove this controller, because it can " "cause problems on the next reboot."], "1:1760114": ["INFO", "ALRT_CBE_KEYSTORE_BACKUP_REQUIRED", "The Data at Rest Encryption keystore has been modified due " "to configuration changes on the array. It is very important" " to retrieve and save a copy of the keystore in order to " "secure your data on the array."], "1:1768001": ["ERROR", "ALRT_KMIP_SERVER_UNAVAILABLE", "A configured KMIP Server is either unavailable " "or misconfigured."], "1:1768002": ["ERROR", "ALRT_KMIP_SERVER_NO_ENCRYPTION_KEY", "ALRT_KMIP_SERVER_NO_ENCRYPTION_KEY", "A configured KMIP Server does not have the encryption " "key for this array"], "10:10000": ["WARNING", "ALRT_FILESYSTEM_REACHED_CIFS_SHARE_MAX_COUNT", "A file system has reached a limit of maximum allowed number " "of SMB shares."], "10:10001": ["WARNING", "ALRT_FILESYSTEM_REACHED_NFS_SHARE_MAX_COUNT", "A file system has reached a limit of maximum allowed number" " of NFS shares"], "12:104e0017": ["CRITICAL", "ALRT_LDAP_NO_CONNECT", "The system could not connect to the LDAP server. This " "impacts your ability to log into the system but does not" " impact data access."], "12:104f0003": ["ERROR", "ALRT_TIME_NOT_SYNCED", "There is a significant difference between the clock time" " of the Storage Processor (SP) and the Windows domain " "controller. To resolve time synchronization problems, " "you can set up a network time protocol (NTP) server or " "contact your Windows domain administrator."], "12:1074002f": ["CRITICAL", "ALRT_MS_DC_NO_CONNECT", "The system could not connect to the Microsoft Windows " "Domain Controller."], "12:10760024": ["CRITICAL", "ALRT_DNS_FAIL_PING", "The DNS server is not available on the network and the " "NX3e system could Not connect."], "12:10760025": ["CRITICAL", "ALRT_DNS_INVALID_CONFIG", "The system cannot connect to the DNS server. The DNS " "server may be configured Incorrectly."], "13:102b0001": ["ERROR", "ALRT_DUPLICATE_ADDRESS_FOUND", "A duplicate address was detected on the network. The " "address being configured cannot be used, because it is " "being used by another Node."], "13:102b0002": ["ERROR", "ALRT_DUPLICATE_ADDRESS_FOUND", "A duplicate address was detected on the network. The " "address being configured cannot be used, because it is " "being used by another node."], "13:10360005": ["WARNING", "ALRT_NAS_CA_CERT_EXPIRES_TODAY", "The CA certificate installed on the NAS server will " "expire today. This certificate is required to keep " "SSL-enabled services (such as LDAP with enabled SSL " "security and CA certificate validation) functioning. " "Upon certificate expiration, users may lose access to " "shares on the NAS server, especially when multiprotocol " "sharing is enabled. Contact the system administrator to " "renew the CA certificate, and then upload it to the " "NAS server."], "13:10360007": ["WARNING", "ALRT_NAS_CA_CERT_EXPIRES_IN_ONE_WEEK", "The CA certificate installed on the NAS server will " "expire in one week. This certificate is required to keep " "SSL-enabled services (such as LDAP with enabled SSL " "security and CA certificate validation) functioning. " "Once it expires, users may lose access to shares on the" " NAS server, especially when multiprotocol sharing is " "enabled. Contact the system administrator to renew the " "CA certificate, and then upload it to the NAS server."], "13:10360008": ["ERROR", "ALRT_NAS_CA_CERT_HAS_EXPIRED", "The CA certificate installed on the NAS server has " "expired. Services that use this certificate to " "validate remote hosts (such as LDAP with enabled SSL " "security and CA certificate validation) will not " "function properly, and corresponding SSL connections " "will be rejected. Users may lose access to shares on " "the NAS server, especially when multiprotocol sharing " "is enabled. Contact the system administrator to renew " "the CA certificate, and then upload it to the " "NAS server."], "13:10360009": ["INFO", "ALRT_NAS_CA_CERT_EXPIRES_IN_30_DAYS", "The CA certificate installed on the NAS server will " "expire in 30 days. This certificate is required to " "keep SSL-enabled services (such as LDAP with enabled " "SSL security and CA certificate validation) functioning." " Upon certificate expiration, users may lose access to " "shares on the NAS server, especially when multiprotocol" " sharing is enabled. Contact the system administrator to" " renew the CA certificate, and then upload it to the " "NAS server."], "13:1040003c": ["WARNING", "ALRT_BLOCK_USER_SOFTQUOTA", "You have used too much space in the specified file system" " and should delete unwanted files and directories from it" ". Alternatively, the administrator can increase your soft" " quota limit for the file system."], "13:1040003d": ["ERROR", "ALRT_BLOCK_USER_SOFTQUOTA_EXPIRED", "You have used too much space in the specified file system" " and will no longer be able to write to the file sysetem" " unless you delete unwanted files and directories from it" ". Alternatively, the administrator can increase your soft" " quota limit for the file system."], "13:1040003e": ["ERROR", "ALRT_BLOCK_USER_HARDQUOTA", "You have used too much space in the specified file system" " and will no longer be able to write to it unless you" " delete unwanted files and directories to reduce the" " percentage of used space. Alternatively, the " "administrator can increase your hard quota limit for" " the file system."], "13:1040003f": ["WARNING", "ALRT_BLOCK_USER_SOFTQUOTA_CROSSEDWITHINTREE", "You have used too much space in the specified quota tree" " and should delete unwanted files and directories from" " the tree. Alternatively, the administrator can increase" " your soft quota limit for the quota tree."], "13:10400040": ["ERROR", "ALRT_BLOCK_USER_SOFTQUOTACROSSED_GRACEEXPIREDWITHINTREE", "You have used too much space in the specified quota tree" " and will no longer be able to write to it unless you" " delete unwanted files and directories to reduce the " "percentage of used space. Alternatively, the " "administrator can increase your soft quota limit for" " that quota tree."], "13:10400041": ["ERROR", "ALRT_BLOCK_USER_HARDQUOTAEXCEEDEDWITHINTREE", "You have used too much space in the specified quota tree" " and will no longer be able to write to it unless you" " delete unwanted files and directories to reduce the" " percentage of used space. Alternatively, the " "administrator can increase your hard quota limit for " "the quota tree."], "13:10400042": ["WARNING", "ALRT_BLOCK_TREESOFTQUOTACROSSED", "Too much space has been consumed on the specified quota" " tree. You should delete unwanted files and directories" " from the quota tree. Alternatively, the administrator" " can increase the soft quota limit for the quota tree."], "13:10400043": ["ERROR", "ALRT_BLOCK_TREESOFTQUOTACROSSED_GRACEEXPIRED", "Too much space has been consumed on the specified quota" " tree. Users will no longer be able to write to the quota" " tree unless they delete unwanted files and directories " "from it. Alternatively, the administrator can increase " "the soft quota limit for the quota tree."], "13:10400044": ["ERROR", "ALRT_BLOCK_TREEHARDQUOTAEXCEEDED", "Too much space has been consumed on the specified quota" " tree. Users will no longer be able to write to the quota" " tree unless they delete unwanted files and directories" " from it. Alternatively, the administrator can increase" " the hard quota limit for the quota tree."], "13:10400045": ["WARNING", "ALRT_BLOCK_TREESOFTQUOTA_AGGREGATION", "Too much space has been consumed on the specified quota" " tree. You should delete unwanted files and directories" " from the quota tree. Alternatively, the administrator" " can increase the soft quota limit for the quota tree."], "13:10400046": ["ERROR", "ALRT_BLOCK_TREEHARDQUOTA_AGGREGATION", "Too much space has been consumed on the specified quota" " tree. Users will no longer be able to write to the" " quota tree unless they delete unwanted files and " "directories from it. Alternatively, the administrator " "can increase the hard quota limit for the quota tree."], "13:10400047": ["WARNING", "ALRT_BLOCK_USERSOFTQUOTA_AGGREGATION", "You have used too much space in the specified file" " system and should delete unwanted files and directories" " from it. Alternatively, the administrator can increase" " your soft quota limit for the file system."], "13:10400048": ["ERROR", "ALRT_BLOCK_USERHARDQUOTA_AGGREGATION", "You have used too much space in the specified file system" " and will no longer be able to write to the file system" " unless you delete unwanted files and directories from it" ". Alternatively, the administrator can increase your " "quota limits for the file system."], "13:10400049": ["WARNING", "ALRT_BLOCK_USERSOFTQUOTAWITHINTREE_AGGREGATION", "You have used too much space in the specified quota tree" " and should delete unwanted files and directories from it" ". Alternatively, the administrator can increase your soft" " quota limit for the quota tree."], "13:1040004a": ["ERROR", "ALRT_BLOCK_USERHARDQUOTAWITHINTREE_AGGREGATION", "You have used too much space in the specified quota tree" " and will no longer be able to write to the quota tree" " unless you delete unwanted files and directories from it" ". Alternatively, the administrator can increase your" " quota limits for the quota tree."], "13:10490005": ["ERROR", "ALRT_NAS_NIS_UNREACHABLE", "The Network Information Service (NIS) configured for the" " NAS server was unable to provide user mapping " "information and is not responding. Check the availability" " of the NIS server, and ensure that the domain name and " "addresses used for the server are accurate."], "13:104e0005": ["ERROR", "ALRT_NAS_LDAP_ALL_UNREACHABLE", "The LDAP service configured for the NAS server was unable" " to provide user mapping information and is no longer " "responding. At least one configured LDAP server needs to " "be operational. Check the availability of the LDAP " "servers, and look for connectivity Issues."], "13:104e0007": ["WARNING", "ALRT_NAS_LDAP_BAD_CONFIGURATION", "The LDAP client settings on the NAS server are not " "configured correctly for the domain. You may encounter" " unexpected issues or mapping errors when using LDAP as a" " Unix directory service. Verify account settings. Check " "the binding and access permissions for the " "configured LDAP servers."], "13:104f0001": ["ERROR", "ALRT_NAS_CIFSSERVER_TIMENOTSYNC", "The current system time is not synchronized with the " "Active Directory controller of the domain. Check the " "system NTP (Network Time Protocol) settings to ensure the" " your system's time is synchronized with the time of " "the Active Directory controller."], "13:10510004": ["CRITICAL", "ALRT_VIRUS_CHECKER_NO_CONNECT", "The system could not connect to your virus Checker" " server."], "13:10510005": ["ERROR", "ALRT_VC_ERROR_STOPCIFS", "No virus checker server is available. SMB has stopped and" " cannot resume until a virus checker server becomes " "available. Check the status of the network and the virus" " checker servers."], "13:10510006": ["ERROR", "ALRT_VC_ERROR_STOPVC", "The virus checker server is not available. Virus checking" " is paused and cannot resume until a virus checker server" " becomes available. Check the status of the network and" " the virus checker servers."], "13:1051000b": ["CRITICAL", "ALRT_VIRUS_SCAN_CMPLTE", "The antivirus scan has completed successfully."], "13:1051000c": ["CRITICAL", "ALRT_VIRUS_SCAN_FAIL", "Antivirus scanning has aborted. ."], "13:1051000d": ["CRITICAL", "ALRT_VC_FILE_DELETE", "An infected file was detected and deleted by your " "antivirus application"], "13:1051000e": ["CRITICAL", "ALRT_VC_FILE_RENAMED", "An infected file was detected and renamed by your" " antivirus application."], "13:1051000f": ["CRITICAL", "ALRT_VC_FILE_MOD", "An infected file was detected and modified by your " "antivirus application."], "13:1051001e": ["ERROR", "ALRT_VC_ERROR_SERVER_OFFLINE_MSRPC", "The system could not connect to your virus checker server" ". Check the status of the network and the virus checker" " server."], "13:10510021": ["ERROR", "ALRT_VC_ERROR_SERVER_OFFLINE_MSRPC_WIN", "The system could not connect to your virus checker server" ". Check the status of the network and the virus checker" " server."], "13:10510022": ["ERROR", "ALRT_VC_ERROR_SERVER_OFFLINE_HTTP", "The system could not connect to the virus checker server." " Check the status of the network and the virus checker" " server."], "13:10600002": ["WARNING", "DHSM_CONNECTION_DOWN", "A Distributed Hierarchical Storage Management (DHSM) " "connection to a secondary storage is down. Make sure that" ": 1) The secondary storage is up and running on the " "correct port. 2) The DHSM settings (URL, remote port, " "credentials) are Correct."], "13:10600003": ["INFO", "DHSM_CONNECTION_RESUMED", "A Distributed Hierarchical Storage Management (DHSM) " "connection to a secondary storage has resumed. It is now" " operational."], "13:106c004b": ["ERROR", "ALRT_REP_FAILED_FOR_ATTACHED_SNAPSHOT", "The system cannot replicate an attached snapshot. Detach" " the snapshot. When the detach operation completes, try" " to replicate the snapshot again."], "13:106c004c": ["ERROR", "ALRT_REP_FAILED_FOR_SNAPSHOT_WITH_SHARES_OR_EXPORTS", "The system cannot replicate a snapshot that has shares or" " exports. Delete the shares and exports, and try to " "replicate the snapshot again."], "13:10760001": ["CRITICAL", "ALRT_DNS_NO_CONNECT", "The system could not connect to the DNS server. This may" " be the result of the DNS settings being Incorrect."], "13:1092000f": ["NOTICE", "CEPP_STARTED", "The events publishing service is running on the specified" " NAS server."], "13:10920010": ["NOTICE", "CEPP_STOPPED", "The events publishing service is no longer running on the" " specified NAS server. Events are no longer being sent to" " the CEPA servers."], "13:10920011": ["INFO", "CEPP_SERVER_ONLINE", "The specified CEPA server is operational."], "13:10920012": ["ERROR", "CEPP_SERVER_OFFLINE0", "The specified CEPA server is not operational. Verify: 1)" " Network availability and the CEPA facility is running on" " the CEPA server. 2) That a pool has at least one event " "assigned. 3) That the Events Publishing service is " "running. 4) Network integrity between the SMB server " "and the CEPA server."], "13:10920013": ["ERROR", "CEPP_SERVER_OFFLINENT", "The specified CEPA server is not operational. Verify: 1)" " Network availability and the CEPA facility is running on" " the CEPA server. 2) That a pool has at least one event " "assigned. 3) That the Events Publishing service is " "running. 4) Network integrity between the SMB server and" " the CEPA server."], "13:10920014": ["ERROR", "CEPP_SERVER_OFFLINEHTTP", "The specified CEPA server is not operational. Verify: 1)" " Network availability and the CEPA facility is running on" " the CEPA server. 2) That a pool has at least one event " "assigned. 3) That the Events Publishing service is " "running. 4) Network integrity between the SMB server and " "the CEPA server."], "13:10920015": ["ERROR", "CEPP_CIFS_SUSPENDED", "The SMB service was suspended by the events publishing " "service. The specified pool does not contain at least one" " online CEPA server, and an events policy is in effect. " "Make sure at least one CEPA server is online for this " "pool, or set the events policy to 'Ignore'."], "13:10920016": ["NOTICE", "CEPP_CIFS_RESUME", "The SMB service is no longer suspended by the events " "publishing service. There is either at least one online " "CEPA server in the pool, or the events policy was set to " "'Ignore'."], "13:10940002": ["WARNING", "ALRT_DEDUP_NO_SPACE", "There is insufficient space available to complete " "deduplication. You need to allocate additional space."], "13:10940066": ["WARNING", "ALRT_DEDUP_NO_PROT_SPACE", "There is insufficient space available to complete " "deduplication. You need to allocate additional " "protection space."], "13:10940068": ["INFO", "ALRT_DEDUP_FS_FAILED", "The deduplication process on the specified file system " "failed. This may have occurred because of insufficient " "disk space or other system resource issues. Check any " "related alerts and fix the underlying problems. If the " "problem persists, contact your service provider."], "13:10ad0001": ["WARNING", "ALRT_NO_DEFAULT_UNIX_ACCOUNT", "A Windows user was unable to access a multiprotocol file" " system that has a Unix access policy. Create a valid " "default Unix user for the associated NAS server, or map " "the Windows user to a valid Unix user."], "13:10ad0002": ["WARNING", "ALRT_NO_DEFAULT_WIN_ACCOUNT", "A Unix user was unable to access a multiprotocol file " "system that has a Windows access policy. Create a valid " "default Windows user for the associated NAS server, or " "map the Unix user to a valid Windows user."], "13:10ad0003": ["WARNING", "ALRT_INVALID_DEFAULT_WINDOWS_ACCOUNT", "A Unix user mapped to a default Windows user was unable " "to access a multiprotocol file system with a Windows " "access policy."], "13:10ad0004": ["WARNING", "ALRT_INVALID_DEFAULT_UNIX_ACCOUNT", "A Windows user was unable to access a multiprotocol file " "system because the default Unix user for the associated " "NAS server is invalid. Change the default Unix user to a " "valid user from the Unix directory service, or map the " "Windows user to a valid Unix user."], "13:10ad0005": ["ERROR", "ALRT_NAS_UNIX_USER_MAPPING_ERR", "User mapping failed. The Unix username cannot be mapped " "to a Windows username. Specify a valid Windows username " "to allow the Unix users to access the Windows- based " "file systems."], "13:10ad0007": ["ERROR", "ALRT_NAS_WIN_USER_MAPPING_ERR", "An SMB session cannot be established because the Windows " "username in the domain cannot be mapped to a Unix " "username. Check the Unix Directory Service settings, " "and optionally specify a default Unix username for the " "NAS server."], "14:100001": ["INFO", "DESC_TEST_SNMP_ALERT", "This is a test message to be sent in an SNMP trap."], "14:110001": ["INFO", "DESC_TEST_EMAIL_ALERT", "This is a test email alert message."], "14:160074": ["WARNING", "ALRT_AUTO_REMOVE_FILE_INTERFACE", "The system automatically removed the overridden " "file interface associated with a replication destination " "NAS server, because the corresponding file interface was " "removed on the source NAS server."], "14:160092": ["WARNING", "ALRT_AUTO_DISABLE_DNS_CLIENT", "The system automatically disabled an overridden DNS client " "of a replication destination NAS server, because the " "corresponding DNS client was disabled on the source " "NAS server."], "14:16009c": ["WARNING", "ALRT_AUTO_DISABLE_NIS_CLIENT", "The system automatically disabled the overridden NIS client" " of a replication destination NAS server, because the " "corresponding NIS client was disabled on the source " "NAS server"], "14:1600c4": ["WARNING", "ALRT_AUTO_DISABLE_LDAP_CLIENT", "The system automatically disabled an overridden LDAP client" " of a replication destination NAS server, because the " "corresponding LDAP client was disabled on the source NAS " "server"], "14:170001": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire soon" ". Obtain and install the license files to ensure continued " "access to the relevant feature."], "14:170002": ["CRITICAL", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire soon" ". Obtain and install the license files to ensure continued" " access to the relevant feature."], "14:170003": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170004": ["CRITICAL", "ALRT_ANTI_VIRUS_LICENSE_EXPIRED", "The Antivirus Server Integration license has expired, and " "the storage system no longer has antivirus protection. " "Obtain and install a new license file to ensure access to " "antivirus protection."], "14:170005": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170006": ["CRITICAL", "ALRT_LICENSE_EXPIRED", "The EMC Unity Operating Environment V4.0 license has " "expired, and your access to Unity functionality has been " "disabled. Obtain and install the license file to ensure " "continued access to Unity functionality."], "14:170007": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170008": ["CRITICAL", "ALRT_CIFS_LICENSE_EXPIRED", "The CIFS/SMB Support license has expired, and the storage " "system no longer has support for the CIFS/SMB protocol. " "Obtain and install a new license file to ensure support " "for CIFS/SMB."], "14:170009": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:17000a": ["CRITICAL", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:17000b": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:17000c": ["CRITICAL", "ALRT_EMCSUPPORT_LICENSE_EXPIRED", "The EMC Support license has expired, and the storage " "system's access to EMC support has been disabled. Obtain " "and install a new license file to ensure access to EMC " "support."], "14:17000d": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:17000e": ["CRITICAL", "ALRT_ESA_LICENSE_EXPIRED", "The EMC Storage Analytics (ESA) license has expired, and " "the storage system no longer has access to ESA. Obtain and" " install a new license file to ensure access to ESA."], "14:17000f": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170010": ["CRITICAL", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170011": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170012": ["CRITICAL", "ALRT_FASTVP_LICENSE_EXPIRED", "The FAST VP license has expired, and the storage system no " "longer has support for FAST VP. Obtain and install a new " "license file to ensure support for FAST VP."], "14:170013": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170014": ["CRITICAL", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170015": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170016": ["CRITICAL", "ALRT_ISCSI_LICENSE_WILL_EXPIRE", "The Internet Small Computer System Interface (iSCSI) " "license has expired, and the storage system no longer has" " support for iSCSI. Obtain and install a new license file" " to ensure iSCSI support."], "14:170017": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170018": ["CRITICAL", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170019": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:17001a": ["CRITICAL", "ALRT_LOCAL_COPIES_LICENSE_EXPIRED", "The Local Copies license has expired, and the storage " "system no longer has support for local copies (including " "the ability to create snapshots). Obtain and install a new " "license file to ensure support for local copies."], "14:17001b": ["WARNING", "ALRT_CIFS_LICENSE_EXPIRING", "The NFS license will expire soon, and the storage system's" " support for NFS will be disabled. Obtain and install a new" " license file to ensure continued support for NFS."], "14:17001c": ["CRITICAL", "ALRT_NFS_LICENSE_EXPIRED", "The NFS license has expired, and the storage system no " "longer has support for the NFS protocol. Obtain and install" " a new license file to ensure support for NFS."], "14:17001d": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:17001e": ["CRITICAL", "ALRT_QOS_LICENSE_EXPIRED", "The Quality of Service (QOS) license has expired, and the " "storage system no longer has support for the QOS feature." " Obtain and install a new license file to ensure support " "for the QOS feature."], "14:17001f": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170020": ["CRITICAL", "ALRT_REPLICATION_LICENSE_EXPIRED", "The Replication license has expired, and the storage system" " no longer has support for replication. Obtain and install" " a new license file to ensure support for replication."], "14:170021": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170022": ["CRITICAL", "ALRT_SCE_LICENSE_EXPIRED", "The Storage Capacity Expansion license has expired, and " "your ability to manage extended storage capacity has been" " disabled. Obtain and install a new license file to ensure" " access to extended storage capacity."], "14:170023": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170024": ["CRITICAL", "ALRT_THIN_PROVISIONING_LICENSE_EXPIRED", "The Thin Provisioning license has expired, and the storage" " system no longer has suppport for thin provisioning. " "Obtain and install the license file to ensure support for" " thin provisioning."], "14:170025": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170026": ["CRITICAL", "ALRT_UNISPHERE_LICENSE_EXPIRED", "The Unisphere license has expired, and the storage system's" " access to Unisphere functionality has been disabled. " "Obtain and install a new license file to ensure access to" " Unisphere functionality."], "14:170027": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170028": ["CRITICAL", "ALRT_UC_LICENSE_EXPIRED", "The Unisphere Central license has expired, and the storage" " system's support for Unisphere Central has been disabled." " Obtain and install a new license file to ensure support" " for Unisphere Central."], "14:170029": ["WARNING", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:17002a": ["CRITICAL", "ALRT_VMWARE_LICENSE_EXPIRED", "The VMware VASA/VVols license has expired, and the storage " "system no longer has support for VVols. Obtain and install" " a new license file to ensure support for VVols."], "14:17002b": ["CRITICAL", "ALRT_LICENSE_EXPIRING", "One of your system licenses has expired or will expire " "soon. Obtain and install the license files to ensure " "continued access to the relevant feature."], "14:170032": ["WARNING", "ALRT_INLINE_COMPRESSION_LICENSE_WILL_EXPIRE", "The inline compression license will expire soon. Obtain and" " install a new license file to ensure continued support " "for inline compression."], "14:170033": ["CRITICAL", "ALRT_INLINE_COMPRESSION_LICENSE_EXPIRED", "The Inline Compression license has expired, and the storage" " system no longer has support for inline compression. " "Obtain and install a new license file to ensure support for" " inline compression."], "14:170034": ["NOTICE", "ALRT_MAX_CAPACITY_LIMIT_INCREASE", "The maximum storage capacity limit has been increased."], "14:170051": ["CRITICAL", "ALRT_ANTI_VIRUS_LICENSE_WILL_EXPIRE", "The Antivirus Server Integration license has expired, and " "the storage system's access to antivirus protection will be" " disabled soon. Obtain and install a new license file to " "ensure continued access to antivirus protection."], "14:170052": ["CRITICAL", "ALRT_LICENSE_WILL_EXPIRE", "The EMC Unity Operating Environment, V4.0 license has " "expired, and your access to Unity functionality will be " "disabled soon. Obtain and install a new license file to " "ensure continued access to Unity functionality."], "14:170053": ["CRITICAL", "ALRT_CIFS_SMB_LICENSE_WILL_EXPIRE", "The CIFS/SMB Support license has expired, and the storage" " system's support for the CIFS/SMB protocol will be " "disabled soon. Obtain and install a new license file to " "ensure continued support for CIFS/SMB."], "14:170055": ["CRITICAL", "ALRT_EMCSUPPORT_LICENSE_WILL_EXPIRE", "The EMC Support license has expired, and the storage " "system's access to EMC support will be disabled soon. " "Obtain and install a new license file to ensure continued" " access to EMC support."], "14:170056": ["CRITICAL", "ALRT_ESA_LICENSE_WILL_EXPIRE", "The EMC Storage Analytics (ESA) license has expired, and " "the storage system's access to ESA will be disabled soon. " "Obtain and install a new license file to ensure continued " "access to ESA."], "14:170058": ["CRITICAL", "ALRT_FASTVP_LICENSE_EXPIRED_PERIOD", "The FAST VP license has expired, and the storage system's" " support for FAST VP will be disabled soon. Obtain and " "install a new license file to ensure continued support " "for FAST VP."], "14:17005a": ["CRITICAL", "ALRT_ISCSI_LICENSE_EXPIRING", "The Internet Small Computer System Interface (iSCSI) " "license has expired, and the storage system's support for " "iSCSI will be disabled soon. Obtain and install a new " "license file to ensure continued support for iSCSI."], "14:17005c": ["CRITICAL", "ALRT_LOCAL_COPIES_LICENSE_EXPIRING", "The Local Copies license has expired, and the storage " "system's support for local copies (including the ability " "to create snapshots) will be disabled soon. Obtain and " "install a new license file to ensure continued support " "for local copies."], "14:17005d": ["CRITICAL", "ALRT_NFS_LICENSE_EXPIRING", "The NFS license has expired, and the storage system's " "support for the NFS protocol will be disabled soon. " "Obtain and install a new license file to ensure continued " "support for NFS."], "14:17005e": ["CRITICAL", "ALRT_QOS_LICENSE_EXPIRING", "The Quality of Service (QOS) license has expired, and the" " storage system's support for the QOS feature will be " "disabled soon. Obtain and install a new license file to " "ensure continued support for the QOS feature."], "14:17005f": ["CRITICAL", "ALRT_REPLICATION_LICENSE_EXPIRING", "The Replication license has expired, and the storage " "system's support for replication will be disabled soon. " "Obtain and install a new license file to ensure continued " "support for replication."], "14:170060": ["CRITICAL", "ALRT_SCE_LICENSE_EXPIRING", "The Storage Capacity Expansion license has expired, and " "your ability to manage extended storage capacity will be " "disabled soon. Obtain and install a new license file to " "ensure continued access to extended storage capacity."], "14:170061": ["CRITICAL", "ALRT_THIN_PROVISIONING_LICENSE_EXPIRING", "The Thin Provisioning license has expired, and the storage" " system's support for thin provisioning will be disabled " "soon. Obtain and install the license file to ensure " "continued support for thin provisioning."], "14:170062": ["CRITICAL", "ALRT_UNISPHERE_LICENSE_EXPIRING", "The Unisphere license has expired, and the storage system's" " access to Unisphere functionality will be disabled soon. " "Obtain and install a new license file to ensure continued " "access to Unisphere functionality."], "14:170063": ["CRITICAL", "ALRT_UC_LICENSE_EXPIRING", "The Unisphere Central license has expired, and the storage" " system's support for Unisphere Central will be disabled " "soon. Obtain and install a new license file to ensure " "continued support for Unisphere Central."], "14:170064": ["CRITICAL", "ALRT_VMWARE_LICENSE_EXPIRING", "The VMware VASA/VVols license has expired, and the storage" " system's support for VVols will be disabled soon. obtain " "and install a new license file to ensure continued support" " for VVols."], "14:170065": ["CRITICAL", "ALRT_INLINE_COMPRESSION_LICENSE_EXPIRING", "The Inline Compression license has expired, and the storage" " system's support for inline compression will be disabled " "soon. Obtain and install a new license file to ensure " "continued support for inline compression."], "14:180002": ["ERROR", "ALRT_HEALTH_CHECK_NOT_START", "The pre-upgrade health check has failed to start."], "14:180004": ["ERROR", "ALRT_HEALTH_CHECK_FAILED", "The pre-upgrade health check has failed. Check the error " "messages in the Health Check dialog box."], "14:180005": ["ERROR", "ALRT_HEALTH_CHECK_TERMINATED", "The pre-upgrade health check was unexpectedly terminated. " "Try running the health Check again."], "14:180007": ["ERROR", "ALRT_UPGRADE_NOT_START", "The software upgrade process failed to start. Check the " "system logs and other alerts to identify the issue. Once " "the issue is fixed, try running the upgrade again."], "14:180008": ["NOTICE", "ALRT_UPGRADE_OK", "The upgrade completed successfully. To access the latest " "management software, you must reload Unisphere. Close any" " browsers opened prior to the upgrade and start a new " "Unisphere login session."], "14:180009": ["ERROR", "ALRT_UPGRADE_FAILED", "The upgrade has failed. Review information about the failed" " upgrade on the Settings screen."], "14:18000a": ["ERROR", "ALRT_UPGRADE_TERMINATED", "The upgrade terminated unexpectedly. Please try running the" " upgrade again."], "14:18000c": ["ERROR", "ALRT_UPGRADE_FAILED", "The upgrade has failed. From Unisphere, click Settings " "More Configuration Update Software and review information" " about the failed upgrade."], "14:18000d": ["NOTICE", "ALRT_UPGRADE_SUCCESS", "The upgrade has completed successfully."], "14:18000e": ["NOTICE", "ALRT_UPGRADE_SUCCESS", "The upgrade has completed successfully."], "14:18000f": ["ERROR", "ALRT_UPGRADE_FAILED", "The upgrade has failed. Review information about the failed" " upgrade on the Settings screen."], "14:180010": ["ERROR", "ALRT_UPGRADE_FAILED", "The upgrade has failed. Review information about the " "failed upgrade on the Settings screen."], "14:180011": ["ERROR", "ALRT_UPGRADE_TERMINATED", "The upgrade terminated unexpectedly. Please try running the" " upgrade again."], "14:180012": ["ERROR", "ALRT_UPGRADE_TERMINATED", "The upgrade terminated unexpectedly. Please try running the" " upgrade again."], "14:22001d": ["CRITICAL", "ALRT_STATICPOOL_TRANSACTION_LOG_FAILURE", "System was unable to automatically recover after the " "provisioning operation failed. Contact your service " "provider for assistance with system cleanup."], "14:300007": ["WARNING", "ALRT_DART_FS_OVER_THRESHOLD", "The total number of storage resources has exceeded the " "maximum allowed threshold limit. Delete unneeded snapshots" " or storage resources to free up some space."], "14:30014": ["ERROR", "ALRT_UPGRADE_ECOM_FAILED", "LDAP users and groups may have been lost during the upgrade." " Review the list of LDAP users and groups to determine " "whether any have been deleted. Add missing LDAP users/groups" " again, if necessary."], "14:330009": ["CRITICAL", "ALRT_CONFIG_PSM_RW_FAILED", "The system encountered an error while accessing " "configuration information. Reboot the storage processors " "(SPs) from the Service System page."], "14:380001": ["WARNING", "ALRT_CONTRACT_WILL_EXPIRE", "The = 16 kilobits. The error " "occurred due to bandwidth setting changes made through the " "VNX UI. Reset the link bandwidth to the default value."], "14:60e7c": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_CONCURRENT_SANCOPY_" "SESSION_DESTINATIONS", "The command failed because one or more failed destinations" " exist on this SAN Copy Session due to concurrent sancopy" " sync to different targets. Do not add any new targets to" " the SAN Copy session created by the Unity system. Remove" " any non-Unity targets added to the SAN Copy session to " "recover from the error."], "14:60e7d": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_COMMUNICATING_WITH" "_SNAPVIEW", "A non-recoverable error occurred: An error occured " "communicating with SnapView."], "14:60e7e": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_INCONSISTENT_STATE", "The session has completed successfully but is in an " "inconsistent state."], "14:60e7f": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_DESTINATION_IN_" "INCONSISTENT_STATE", "A non-recoverable error occurred: The session has completed" " successfully but is in an inconsistent state."], "14:60e80": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_RESUME_ON_AUTO_RECOVERY", "A non-recoverable error occurred: Resume of copy session %2" " failed on auto-recovery."], "14:60e81": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_DUE_TO_ALL_PATHS_FAILURE", "A non-recoverable error occurred: Copy session %2 failed due" " to all paths failure on device with WWN %3."], "14:60e82": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_ACCESS_DENIED_TO_DEVICE", "A non-recoverable error occurred: Access denied to the " "device. (WWN)."], "14:60e83": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_NOT_ENOUGH_MEMORY", "A non-recoverable error occurred: Not enough memory " "resources exist to complete the request."], "14:60e84": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_SOURCE_DEVICE_FAILED", "The source device specified in the session failed. (WWN). " "This can be due to either a Raidgroup or Storage Pool being" " offline or corruption on source LUN on VNX. Verify that the" " source LUN is in a good state. Once the resource is in a" " good state, run Resume of session from Unity UI."], "14:60e85": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_DESTINATION_DEVICE" "_FAILED", "The following target device specified in the session failed." " (WWN). This can be due to the storage pool being offline or" " corruption of the target LUN. Verify that the target LUN is" " in a good state. Once the resource is in a good state, run" " Resume operation of session from Unity UI."], "14:60e86": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_DESTINATION_DEVICE_NOT_" "FOUND", "The destination device could not be found due to either" " incorrect zoning on the switch or the device is not in the " "correct storage group. (WWN). This can be due to FC Zoning " "or iSCSI Connection configuration between VNX and Unity " "arrays. Configure connectivity between all SP pairs between" " the VNX and Unity systems. Once the FC or iSCSI connection" " configuration is validated, run the Verify and Update " "operation for the Remote System connection to the VNX to " "discover/update all."], "14:60e87": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_TARGET_LU_NOT_" "INITIALIZED", "A non-recoverable error occurred: Target LUN list has not" " been initialized yet."], "14:60e88": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_COMMAND_TIMED_OUT", "A non-recoverable error occurred: The command timed out " "waiting on another SAN Copy operation to complete."], "14:60e89": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_FRONT_END_" "DEVICE_TIMEDOUT", "A non-recoverable error occurred: Verifying front end devic" "e timed out."], "14:60e8a": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_FRONT_END_" "DEVICE_TIMEDOUT_ANOTHER_OPERATION", "A non-recoverable error occurred: Verifying front end device" " timed out waiting for another front end operation to " "complete."], "14:60e8b": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_SOURCE_" "CONNECTIVITY_TIMEDOUT", "A non-recoverable error occurred: Operation timed out " "trying to verify the connectivity to the source device."], "14:60e8c": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_VERIFYING_DESTINATION" "_CONNECTIVITY_TIMEDOUT", "A non-recoverable error occurred: Operation timed out " "trying to verify the connectivity to the target device."], "14:60e8d": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_RLP_LUN_IO_FAILURE", "A non-recoverable error occurred: Operation failed due to " "an unrecoverable I/O failure of a reserved LUN."], "14:60e8e": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_TOTAL_" "SESSIONS_FOR_SANCOPYE_REACHED", "A non-recoverable error occurred: This copy session could " "not be created because the limit of total sessions for " "SAN Copy/E has been reached."], "14:60e8f": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_INCREMENTAL" "_SESSIONS_FOR_SANCOPYE_REACHED", "This copy session could not be created because the limit " "of incremental sessions for SAN Copy/E has been reached. " "Resolve the limit issue by deleting an existing incremental " "session related to systems other than the Unity system or " "remove some MirrorView/A sessions from the system. Once the" " limit issue is resolved, run the resume operation on teh " "import session from the Unity system."], "14:60e90": ["INFO", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_COPY_COMMAND_QUEUED", "Copy command is queued due to SAN Copy concurrent sync " "limits interference from a VNX admininstrator scheduled" " start and the Unity scheduled start. Stop or abort any SAN" " Copy starts issued on VNX systems on imports happening to" " non- Unity systems."], "14:60e91": ["INFO", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_ON_SOURCE_OR" "_DESTINATIONS", "The session failed because either the source or all targets" " have failed due to failure status on the source or target" " device of the SAN Copy session. Log in to the VNX system " "and resolve the SAN Copy error reported for this element " "session and resume the SAN Copy session from the VNX UI."], "14:60e92": ["INFO", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_DEVICE_CANNOT_BE_LOCATED", "Element import session related Sancopy session ran into " "error: 0x712A0030: Unable to locate the device. Check that " "the device with this WWN exists Session ran into an non- " "recoverable error. Please collect support materials from " "both VNX and Unity system. Report an issue with EMC Support" " for resolution. Please cancel the session."], "14:60e93": ["WARNING", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_NO_UNUSED_RLP_LUNS", "There are no unused LUNs available in the reserved LUN pool" " (RPL) for session create or start. Add LUNs to the RLP" " pool, then resume the import session operation."], "14:60e94": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_RLP_LUN_NOT_SUPPORT_" "INCREMENTAL_SESSIONS", "A non-recoverable error occurred: Existing reserved LUN does" " not support incremental sessions"], "14:60e95": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_SNAPVIEW_RESERVED_" "LUN_NOT_ENOUGH_SPACE", "A non-recoverable error occurred: A SnapView reserved LUN " "did not have sufficient space for the minimum map regions."], "14:60e96": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_TOO_MANY_SNAPSHOTS_ON_" "SINGLE_LU", "A non-recoverable error occurred: Too many snapshots have" " been created on a single source LUN."], "14:60e97": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_CANNOT_OPEN_RESERVED_LUN", "A non-recoverable error occurred: The reserved LUN cannot" " be opened."], "14:60e98": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_CANNOT_GET_RESERVED_" "LUN_INFO", "A non-recoverable error has occurred: Unable to get the " "geometry information for reserved LUN."], "14:60e99": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_NO_SPACE_ON_RLP", "No more room exists in the reserved LUN pool (RLP). An RLP" " LUN or space is unavailable to create or start a session." " Add LUNs to the RLP pool, then resume the operation."], "14:60e9a": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_TOTAL_NUMBER_SUPPORTED_" "INCREMENTAL_SESSIONS_REACHED", "This incremental copy session could not be created because " "the maximum incremental SAN Copy sessions limit on the VNX" " has been reached. The limit is shared with the MirrorView " "Async feature. Resolve the limit issue by removing an" " unwanted or unused SAN Copy session related to systems " "other than the Unity system or remove some MirrorView/A " "sessions from the system. Once the limit issue is resolved," " run the Resume operation on the import session from the " "Unity system."], "14:60e9b": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_TOTAL_SANCOPY" "_SESSIONS_REACHED", "This incremental copy session could not be created because" " the maximum incremental SAN Copy sessions limit on the" " VNX has been reached. The limit is shared with the " "MirrorView Async feature. Resolve the limit issue by" " removing an unwanted or unused SAN Copy session related " "to systems other than the Unity system or remove some " "MirrorView/A sessions from the system. Once the limit " "issue is resolved, run the Resume operation on the import " "session from the Unity system."], "14:60e9c": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_LIMIT_OF_TOTAL_I" "NCREMENTAL_SANCOPY_SESSIONS_REACHED", "This incremental copy session could not be created " "because the maximum incremental SAN Copy sessions limit " "on the VNX has been reached. The limit is shared with the " "MirrorView Async feature. Resolve the limit issue by " "removing an unwanted or unused SAN Copy session related to " "systems other than the Unity system or remove some" " MirrorView/A sessions from the system. Once the limit " "issue is resolved, run the Resume operation on the import" " session from the Unity system."], "14:60e9d": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_LOST_COMMUNICATION", "Communication with the source array has been lost. On the " "Remote System Connection page, click Verify and Update" " Connection. If that does not correct the issue, verify " "that the physical network is operational."], "14:60e9e": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_RLP_MAXIMUM_DEVICES", "The reserved LUN pool (RLP) has its maximum number of " "devices. An RLP LUN or space is unavailable to create or" " start a session. Add LUNs to the RLP pool, then resume " "the operation."], "14:60e9f": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_NO_CACHE_DEVICES", "The user attempted to start a session without cache devices." " A reserved LUN pool (RLP) LUN or space is unavailable to " "create or start a session. Add LUNs to the RLP pool, then " "resume the operation."], "14:60ea0": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_TARGET_" "INSUFFICIENT_SPACE", "Failed to write to target device due to insufficient " "storage space, which can be caused by a pool out of space or" " target device error state on the Unity system. Verify the " "condition of the target device, or pool, or both. Add or" " free storage space in the pool, or correct the resource " "state, or both and then resume the operation."], "14:60ea1": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_DEVICE_NOT_READY", "Element import session related to a SAN Copy session failed" " because the device is not ready. One cause can be a reboot" " of the VNX system, which would cause the SAN Copy session" " to go to the paused state. Resolve the VNX reboot issue " "and verify that the source LUN or LUNs are completly " "recovered. Then from the Unity console, run the Resume " "operation on the import session to recover."], "14:60ea2": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_COMMUNICATING_WITH_" "SNAPVIEW_1", "A non-recoverable error occurred: An error occured " "communicating with SnapView."], "14:60ea3": ["CRITICAL", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_SOURCE_DEVICE" "_UNAVAILABLE", "Element import session related Sancopy session failed " "because the source device is unavailable for IO operations." " Ensure that the device is not a MirrorView secondary image," " a SnapView Clone, an inactive Snapshot, or a detached or" " offline VNX Snapshot Mount Point. If the session still" " fails, gather SPcollects and contact your service " "provider."], "14:60ea4": ["WARNING", "ALRT_IMPORT_REMOTE_SYSTEM_DEGRADED", "Connection to one of the remote VNX system management " "IP addresses cannot be made. Check and restore connectivity " "to both management IP addresses on the VNX system."], "14:60ea5": ["ERROR", "ALRT_ELEMENT_IMPORT_SESSION_FAILED_SOURCE_IN_IMPORT_SESSION", "The import session could not be created because the source" " resource is already in import session. Resolve the issue" " by removing the SAN Copy session for this resource on the" " VNX. Once the issue is resolved, wait for few minutes and" " run the Resume operation on the import session from the" " Unity system."], "14:60f00": ["INFO", "ALRT_ROUTE_OK", "The component is operating normally."], "14:60f01": ["ERROR", "ALRT_ROUTE_INVALID_IP_VERSION", "There is an IPv4/IPv6 mismatch between the network route's" " destination and/or gateway, and the source IP interface." " Edit the destination and/or gateway attributes of the" " route."], "14:60f02": ["ERROR", "ALRT_ROUTE_SRC_IP_NOT_FOUND", "The source IP interface of the network route does not " "exist."], "14:60f03": ["ERROR", "ALRT_ROUTE_DIFF_SUBNET", "The gateway of the network route is inaccessible, because it" " is not on the same subnet as the source interface. Modify" " the attributes of the network route or source Interface to " "associate them with the same subnet."], "14:60f04": ["ERROR", "ALRT_ROUTE_NOT_OPERATIONAL", "The network route is not operational. Delete the route and" " create a new one, if necessary."], "14:61008c": ["NOTICE", "ALRT_MIGRATION_SESSION_CUTOVER_THRESHOLD_PERCENTAGE" "_REMAINING", "Import session reached cutover threshold and is cutover " "ready."], "14:62001f": ["WARNING", "ALRT_QOS_MAX_PERF_CLASSES", "Maximum number of I/O limit resources has been reached."], "14:640001": ["CRITICAL", "ALRT_UDOCTOR_FAIL_TO_START", "An error has occurred that is preventing the UDoctor " "service from starting up. Contact your service provider."], "14:640002": ["ERROR", "ALRT_UDOCTOR_GENERAL_ALERT", "The UDoctor service has detected an error and generated" " this alert. For more information, refer to the relevant" " knowledgebase article on the support website or contact " "your service provider."], "14:640003": ["CRITICAL", "ALRT_UDOCTOR_CRITICAL_ALERT", "The UDoctor service has detected an error and generated " "this alert. For more information, refer to the relevant " "knowledgebase article on the support website or contact " "your service provider."], "14:70001": ["ERROR", "ALRT_SEND_FAILED", "The storage system failed to communicate an event message " "via the Email server, SNMP servers, or ESRS gateway or " "servers. Resolve the problem with the Email, ESRS, or " "SNMP servers."], "14:70003": ["ERROR", "ALRT_BAD_PRVCY", "Set the SNMP privacy protocol to one of the valid values: " "DES or AES."], "14:70004": ["ERROR", "ALRT_BAD_AUTH", "Set the SNMP authentication protocol to one of the valid " "values: MD5 or SHA."], "14:80001": ["INFO", "DESC_TEST_UI_ALERT", "This is a test message to be shown in a UI pop-up."], "14:90001": ["INFO", "DESC_TEST_PHONE_HOME_ALERT", "This is a test message to be sent via ConnectHome."], "201:20000": ["INFO", "ALRT_NTP_OK", "The system can now reach the NTP server."], "201:20001": ["WARNING", "ALRT_NTP_PART_NO_CONNECT", "The system has a partial connection to the NTP server."], "201:20002": ["ERROR", "ALRT_NTP_NO_CONNECT", "The system could not connect to the Time Server (NTP)." " Check your NTP settings."], "301:24001": ["WARNING", "ALRT_STORAGE_SERVER_RESTART", "The NAS servers that are configured to run on this Storage" " Processor (SP) have stopped and will be automatically " "restarted. ? This may affect host connections, which may" " need to be reconnected to your storage resources. If the " "problem persists, contact your service provider."], "301:30000": ["INFO", "ALRT_CONTROLLED_REBOOT_START", "This Storage Processor (SP) is currently rebooting. No " "action is required."], "301:30001": ["INFO", "ALRT_CONTROLLED_REBOOT_FINISHED", "The Storage Processor (SP) has finished rebooting. No " "action is required."], "301:30002": ["INFO", "ALRT_CONTROLLED_SERVICE_START", "This Storage Processor (SP) is currently rebooting into " "Service Mode. No action is required."], "301:3000e": ["INFO", "ALRT_CONTROLLED_SYSTEMSHUTDOWN_START", "The Storage Processor (SP) is shutting down. The shut down " "and power up procedure must be performed in a particular" " order. If you have not already printed the power up" " instructions, go to the EMC Support website to locate " "product documentation."], "301:30010": ["NOTICE", "PLATFORM_HARDWARE_PERSIST_STARTED", "A hardware commit operation has started. The system may " "reboot multiple times. Please do not interrupt this" " process."], "301:30011": ["NOTICE", "PLATFORM_HARDWARE_COMMIT_COMPLETE", "Your new hardware configuration has been committed and is " "now ready for use."], "301:3001a": ["INFO", "ALRT_VVNX_CONTROLLED_SYSTEMSHUTDOWN_START", "The Storage Processor (SP) is shutting down."], "301:31004": ["ERROR", "ALRT_DEBUG_PROCESS_CRASH", "A process crashed, and it might impact the whole system." " Please check the core dump by svc_dc -lcd after data " "collection is complete (it usually needs several" " minutes)."], "301:32001": ["ERROR", "PLATFORM_HARDWARE_COMMIT_FAILED", "The hardware configuration could not be committed. Please" " try again."], "301:40001": ["INFO", "ALRT_METRICS_DB_RECOVERED", "Performance metrics are available now. No action is " "required."], "301:48000": ["ERROR", "ALRT_METRICS_DB_FAIL", "Performance metrics are unavailable due to a system error." " Contact your service provider."] } IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Input/output operations per second" } READ_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Read input/output operations per second" } WRITE_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Write input/output operations per second" } THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data is " "successfully transferred in MB/s" } READ_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data read is " "successfully transferred in MB/s" } WRITE_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data write is " "successfully transferred in MB/s" } RESPONSE_TIME_DESCRIPTION = { "unit": "ms", "description": "Average time taken for an IO " "operation in ms" } CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of io that are cache hits" } READ_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of read ops that are cache hits" } WRITE_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of write ops that are cache hits" } IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of IO requests in KB" } READ_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of read IO requests in KB" } WRITE_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of write IO requests in KB" } CPU_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of CPU usage" } MEMORY_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of DISK memory usage in percentage" } SERVICE_TIME = { "unit": 'ms', "description": "Service time of the resource in ms" } VOLUME_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "ioSize": IO_SIZE_DESCRIPTION, "readIoSize": READ_IO_SIZE_DESCRIPTION, "writeIoSize": WRITE_IO_SIZE_DESCRIPTION } PORT_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION } DISK_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION } FILESYSTEM_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "ioSize": IO_SIZE_DESCRIPTION, "readIoSize": READ_IO_SIZE_DESCRIPTION, "writeIoSize": WRITE_IO_SIZE_DESCRIPTION } ================================================ FILE: delfin/drivers/dell_emc/unity/rest_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import requests import six from oslo_log import log as logging from delfin import cryptor from delfin import exception from delfin.drivers.dell_emc.unity import consts from delfin.drivers.utils.rest_client import RestClient LOG = logging.getLogger(__name__) class RestHandler(RestClient): REST_AUTH_URL = '/api/types/loginSessionInfo/instances' REST_STORAGE_URL = '/api/types/system/instances' REST_CAPACITY_URL = '/api/types/systemCapacity/instances' REST_SOFT_VERSION_URL = '/api/types/installedSoftwareVersion/instances' REST_LUNS_URL = '/api/types/lun/instances' REST_POOLS_URL = '/api/types/pool/instances' REST_ALERTS_URL = '/api/types/alert/instances' REST_DEL_ALERTS_URL = '/api/instances/alert/' REST_LOGOUT_URL = '/api/types/loginSessionInfo/action/logout' AUTH_KEY = 'EMC-CSRF-TOKEN' REST_CONTROLLER_URL = '/api/types/storageProcessor/instances' REST_DISK_URL = '/api/types/disk/instances' REST_FCPORT_URL = '/api/types/fcPort/instances' REST_ETHPORT_URL = '/api/types/ethernetPort/instances' REST_IP_URL = '/api/types/ipInterface/instances' REST_FILESYSTEM_URL = '/api/types/filesystem/instances' REST_NFSSHARE_URL = '/api/types/nfsShare/instances' REST_CIFSSHARE_URL = '/api/types/cifsShare/instances' REST_QTREE_URL = '/api/types/treeQuota/instances' REST_USERQUOTA_URL = '/api/types/userQuota/instances' REST_QUOTACONFIG_URL = '/api/types/quotaConfig/instances' REST_VIRTUAL_DISK_URL = '/api/types/virtualDisk/instances' STATE_SOLVED = 2 def __init__(self, **kwargs): super(RestHandler, self).__init__(**kwargs) self.session_lock = threading.Lock() def login(self): """Login dell_emc unity storage array.""" try: with self.session_lock: if self.session is None: self.init_http_head() self.session.headers.update({"X-EMC-REST-CLIENT": "true"}) self.session.auth = requests.auth.HTTPBasicAuth( self.rest_username, cryptor.decode(self.rest_password)) res = self.call_with_token(RestHandler.REST_AUTH_URL) if res.status_code == 200: self.session.headers[RestHandler.AUTH_KEY] = \ cryptor.encode(res.headers[RestHandler.AUTH_KEY]) else: LOG.error("Login error.URL: %s,Reason: %s.", RestHandler.REST_AUTH_URL, res.text) if 'Unauthorized' in res.text: raise exception.InvalidUsernameOrPassword() elif 'Forbidden' in res.text: raise exception.InvalidIpOrPort() else: raise exception.StorageBackendException( six.text_type(res.text)) except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e def call_with_token(self, url, data=None, method='GET', calltimeout=consts.DEFAULT_TIMEOUT): auth_key = None if self.session: auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None) if auth_key: self.session.headers[RestHandler.AUTH_KEY] \ = cryptor.decode(auth_key) res = self.do_call(url, data, method, calltimeout) if auth_key: self.session.headers[RestHandler.AUTH_KEY] = auth_key return res def logout(self): try: if self.san_address: self.call(RestHandler.REST_LOGOUT_URL, None, 'POST') if self.session: self.session.close() except Exception as e: err_msg = "Logout error: %s" % (six.text_type(e)) LOG.error(err_msg) raise e def get_rest_info(self, url, data=None, method='GET', calltimeout=consts.DEFAULT_TIMEOUT): retry_times = consts.REST_RETRY_TIMES while retry_times >= 0: try: res = self.call(url, data, method, calltimeout) if res.status_code == 200: return res.json() err_msg = "rest response abnormal,status_code:%s,res.json:%s" \ % (res.status_code, res.json()) LOG.error(err_msg) except Exception as e: LOG.error(e) retry_times -= 1 return None def call(self, url, data=None, method='GET', calltimeout=consts.DEFAULT_TIMEOUT): try: res = self.call_with_token(url, data, method, calltimeout) if res.status_code == 401: LOG.error("Failed to get token, status_code:%s,error_mesg:%s" % (res.status_code, res.text)) self.login() res = self.call_with_token(url, data, method, calltimeout) elif res.status_code == 503: raise exception.InvalidResults(res.text) return res except Exception as e: LOG.error("Method:%s,url:%s failed: %s" % (method, url, six.text_type(e))) raise e def get_all_pools(self): url = '%s?%s' % (RestHandler.REST_POOLS_URL, 'fields=id,name,health,type,sizeFree,' 'sizeTotal,sizeUsed,sizeSubscribed') result_json = self.get_rest_info(url) return result_json def get_storage(self): url = '%s?%s' % (RestHandler.REST_STORAGE_URL, 'fields=name,model,serialNumber,health') result_json = self.get_rest_info(url) return result_json def get_capacity(self): url = '%s?%s' % (RestHandler.REST_CAPACITY_URL, 'fields=sizeFree,sizeTotal,sizeUsed,' 'sizeSubscribed,totalLogicalSize') result_json = self.get_rest_info(url) return result_json def get_soft_version(self): url = '%s?%s' % (RestHandler.REST_SOFT_VERSION_URL, 'fields=version') result_json = self.get_rest_info(url) return result_json def get_all_luns(self, page_number): url = '%s?%s&page=%s' % (RestHandler.REST_LUNS_URL, 'fields=id,name,health,type,sizeAllocated,' 'sizeTotal,sizeUsed,pool,wwn,isThinEnabled', page_number) result_json = self.get_rest_info(url) return result_json def get_all_alerts(self, page_number): url = '%s?%s&page=%s' % (RestHandler.REST_ALERTS_URL, 'fields=id,timestamp,severity,component,' 'messageId,message,description,' 'descriptionId,state', page_number) result_json = self.get_rest_info( url, None, 'GET', consts.ALERT_TIMEOUT) return result_json def get_all_alerts_without_state(self, page_number): url = '%s?%s&page=%s' % (RestHandler.REST_ALERTS_URL, 'fields=id,timestamp,severity,component,' 'messageId,message,description,' 'descriptionId', page_number) result_json = self.get_rest_info( url, None, 'GET', consts.ALERT_TIMEOUT) return result_json def remove_alert(self, alert_id): data = {"state": RestHandler.STATE_SOLVED} url = '%s%s/action/modify' % (RestHandler.REST_DEL_ALERTS_URL, alert_id) result_json = self.get_rest_info(url, data, method='POST') return result_json def get_all_controllers(self): url = '%s?%s' % (RestHandler.REST_CONTROLLER_URL, 'fields=id,name,health,model,slotNumber,' 'manufacturer,memorySize') result_json = self.get_rest_info(url) return result_json def get_all_disks(self): url = '%s?%s' % (RestHandler.REST_DISK_URL, 'fields=id,name,health,model,slotNumber,' 'manufacturer,version,emcSerialNumber,wwn,' 'rpm,size,diskGroup,diskTechnology') result_json = self.get_rest_info(url) return result_json def get_all_fcports(self): url = '%s?%s' % (RestHandler.REST_FCPORT_URL, 'fields=id,name,health,slotNumber,storageProcessor,' 'currentSpeed,wwn') result_json = self.get_rest_info(url) return result_json def get_all_ethports(self): url = '%s?%s' % (RestHandler.REST_ETHPORT_URL, 'fields=id,name,health,portNumber,storageProcessor,' 'speed,isLinkUp,macAddress') result_json = self.get_rest_info(url) return result_json def get_port_interface(self): url = '%s?%s' % (RestHandler.REST_IP_URL, 'fields=id,ipPort,ipProtocolVersion,' 'ipAddress,netmask') result_json = self.get_rest_info(url) return result_json def get_all_filesystems(self): url = '%s?%s' % (RestHandler.REST_FILESYSTEM_URL, 'fields=id,name,health,sizeAllocated,accessPolicy,' 'sizeTotal,sizeUsed,isThinEnabled,pool,flrVersion') result_json = self.get_rest_info(url) return result_json def get_all_filesystems_without_flr(self): url = '%s?%s' % (RestHandler.REST_FILESYSTEM_URL, 'fields=id,name,health,sizeAllocated,accessPolicy,' 'sizeTotal,sizeUsed,isThinEnabled,pool') result_json = self.get_rest_info(url) return result_json def get_all_nfsshares(self): url = '%s?%s' % (RestHandler.REST_NFSSHARE_URL, 'fields=id,filesystem,name,path') result_json = self.get_rest_info(url) return result_json def get_all_cifsshares(self): url = '%s?%s' % (RestHandler.REST_CIFSSHARE_URL, 'fields=id,filesystem,name,path') result_json = self.get_rest_info(url) return result_json def get_all_qtrees(self): url = '%s?%s' % (RestHandler.REST_QTREE_URL, 'fields=id,filesystem,description,path,hardLimit,' 'softLimit,sizeUsed,quotaConfig') result_json = self.get_rest_info(url) return result_json def get_all_userquotas(self): url = '%s?%s' % (RestHandler.REST_USERQUOTA_URL, 'fields=id,filesystem,hardLimit,softLimit,' 'sizeUsed,treeQuota,uid') result_json = self.get_rest_info(url) return result_json def get_quota_configs(self): url = '%s?%s' % (RestHandler.REST_QUOTACONFIG_URL, 'fields=id,filesystem,treeQuota,quotaPolicy') result_json = self.get_rest_info(url) return result_json def get_host_initiators(self, page): url = '/api/types/hostInitiator/instances?%s&page=%s' % \ ('fields=id,health,type,parentHost,initiatorId', page) result_json = self.get_rest_info(url) return result_json def get_all_hosts(self, page): url = '/api/types/host/instances?%s&page=%s' \ % ('fields=id,health,name,description,osType', page) result_json = self.get_rest_info(url) return result_json def get_host_ip(self): url = '/api/types/hostIPPort/instances?%s' % \ ('fields=id,name,address,netmask,host') result_json = self.get_rest_info(url) return result_json def get_host_lun(self, page): url = '/api/types/hostLUN/instances?%s&page=%s' % \ ('fields=id,host,lun', page) result_json = self.get_rest_info(url) return result_json def get_history_metrics(self, path, page): url = '/api/types/metricValue/instances?filter=path EQ "%s"&page=%s'\ % (path, page) result_json = self.get_rest_info(url) return result_json def get_virtual_disks(self): url = '%s?%s' % (RestHandler.REST_VIRTUAL_DISK_URL, 'fields=health,name,spaScsiId,tierType,sizeTotal,' 'id,model,manufacturer,wwn') result_json = self.get_rest_info(url) return result_json ================================================ FILE: delfin/drivers/dell_emc/unity/unity.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import six from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import constants from delfin.drivers import driver from delfin.drivers.dell_emc.unity import rest_handler, alert_handler, consts from delfin.drivers.dell_emc.unity.alert_handler import AlertHandler LOG = log.getLogger(__name__) class UnityStorDriver(driver.StorageDriver): """UnityStorDriver implement the DELL EMC Storage driver""" HEALTH_OK = (5, 7) STORAGE_STATUS_MAP = {5: constants.StorageStatus.NORMAL, 7: constants.StorageStatus.NORMAL, 15: constants.StorageStatus.NORMAL, 20: constants.StorageStatus.NORMAL, 10: constants.StorageStatus.DEGRADED } FILESYSTEM_FLR_MAP = {0: constants.WORMType.NON_WORM, 1: constants.WORMType.ENTERPRISE, 2: constants.WORMType.COMPLIANCE } FILESYSTEM_SECURITY_MAP = {0: constants.NASSecurityMode.NATIVE, 1: constants.NASSecurityMode.UNIX, 2: constants.NASSecurityMode.NTFS } CONTROLLER_STATUS_MAP = {5: constants.ControllerStatus.NORMAL, 7: constants.ControllerStatus.NORMAL, 10: constants.ControllerStatus.DEGRADED } DISK_TYPE_MAP = {1: constants.DiskPhysicalType.SAS, 2: constants.DiskPhysicalType.NL_SAS, 6: constants.DiskPhysicalType.FLASH, 7: constants.DiskPhysicalType.FLASH, 8: constants.DiskPhysicalType.FLASH, 9: constants.DiskPhysicalType.FLASH, 99: constants.DiskPhysicalType.VMDISK } VOLUME_PERF_METRICS = { 'readIops': 'sp.*.storage.lun.*.readsRate', 'writeIops': 'sp.*.storage.lun.*.writesRate', 'readThroughput': 'sp.*.storage.lun.*.readBytesRate', 'writeThroughput': 'sp.*.storage.lun.*.writeBytesRate', 'responseTime': 'sp.*.storage.lun.*.responseTime', 'readIoSize': 'sp.*.storage.lun.*.avgReadSize', 'writeIoSize': 'sp.*.storage.lun.*.avgWriteSize' } DISK_PERF_METRICS = { 'readIops': 'sp.*.physical.disk.*.readsRate', 'writeIops': 'sp.*.physical.disk.*.writesRate', 'readThroughput': 'sp.*.physical.disk.*.readBytesRate', 'writeThroughput': 'sp.*.physical.disk.*.writeBytesRate', 'responseTime': 'sp.*.physical.disk.*.responseTime' } ETHERNET_PORT_METRICS = { 'readThroughput': 'sp.*.net.device.*.bytesInRate', 'writeThroughput': 'sp.*.net.device.*.bytesOutRate', 'readIops': 'sp.*.net.device.*.pktsInRate', 'writeIops': 'sp.*.net.device.*.pktsOutRate', } FC_PORT_METRICS = { 'readIops': 'sp.*.fibreChannel.fePort.*.readsRate', 'writeIops': 'sp.*.fibreChannel.fePort.*.writesRate', 'readThroughput': 'sp.*.fibreChannel.fePort.*.readBytesRate', 'writeThroughput': 'sp.*.fibreChannel.fePort.*.writeBytesRate' } ISCSI_PORT_METRICS = { 'readIops': 'sp.*.iscsi.fePort.*.readsRate', 'writeIops': 'sp.*.iscsi.fePort.*.writesRate', 'readThroughput': 'sp.*.iscsi.fePort.*.readBytesRate', 'writeThroughput': 'sp.*.iscsi.fePort.*.writeBytesRate' } FILESYSTEM_PERF_METRICS = { 'readIops': 'sp.*.storage.filesystem.*.readsRate', 'writeIops': 'sp.*.storage.filesystem.*.writesRate', 'readThroughput': 'sp.*.storage.filesystem.*.readBytesRate', 'writeThroughput': 'sp.*.storage.filesystem.*.writeBytesRate', 'readIoSize': 'sp.*.storage.filesystem.*.readSizeAvg', 'writeIoSize': 'sp.*.storage.filesystem.*.writeSizeAvg' } PERF_TYPE_MAP = { 'readIops': {'write': 'writeIops', 'total': 'iops'}, 'readThroughput': {'write': 'writeThroughput', 'total': 'throughput'}, 'readIoSize': {'write': 'writeIoSize', 'total': 'ioSize'}, } MS_PER_HOUR = 60 * 60 * 1000 OS_TYPE_MAP = {'AIX': constants.HostOSTypes.AIX, 'Citrix XenServer': constants.HostOSTypes.XEN_SERVER, 'HP-UX': constants.HostOSTypes.HP_UX, 'IBM VIOS': constants.HostOSTypes.UNKNOWN, 'Linux': constants.HostOSTypes.LINUX, 'Mac OS': constants.HostOSTypes.MAC_OS, 'Solaris': constants.HostOSTypes.SOLARIS, 'VMware ESXi': constants.HostOSTypes.VMWARE_ESX, 'Windows Client': constants.HostOSTypes.WINDOWS, 'Windows Server': constants.HostOSTypes.WINDOWS } INITIATOR_STATUS_MAP = {5: constants.InitiatorStatus.ONLINE, 7: constants.InitiatorStatus.ONLINE, 15: constants.InitiatorStatus.ONLINE, 20: constants.InitiatorStatus.ONLINE, 10: constants.InitiatorStatus.OFFLINE } HOST_STATUS_MAP = {5: constants.HostStatus.NORMAL, 7: constants.HostStatus.NORMAL, 15: constants.HostStatus.NORMAL, 20: constants.HostStatus.NORMAL, 10: constants.HostStatus.DEGRADED } INITIATOR_TYPE_MAP = {0: constants.InitiatorType.UNKNOWN, 1: constants.InitiatorType.FC, 2: constants.InitiatorType.ISCSI } def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_handler = rest_handler.RestHandler(**kwargs) self.rest_handler.login() def reset_connection(self, context, **kwargs): self.rest_handler.logout() self.rest_handler.verify = kwargs.get('verify', False) self.rest_handler.login() def close_connection(self): self.rest_handler.logout() def get_disk_capacity(self, context): raw_capacity = 0 try: disk_info = self.list_disks(context) if disk_info: for disk in disk_info: raw_capacity += disk.get('capacity') except Exception: LOG.info("get disk info fail in get_disk_capacity") return raw_capacity def get_storage(self, context): system_info = self.rest_handler.get_storage() capacity = self.rest_handler.get_capacity() version_info = self.rest_handler.get_soft_version() if not system_info or not capacity: err_msg = "unity get system or capacity info failed" LOG.error(err_msg) raise exception.StorageBackendException(err_msg) system_entries = system_info.get('entries') for system in system_entries: content = system.get('content', {}) name = content.get('name') model = content.get('model') serial_number = content.get('serialNumber') health_value = content.get('health', {}).get('value') status = UnityStorDriver.STORAGE_STATUS_MAP.get( health_value, constants.StorageStatus.ABNORMAL) break capacity_info = capacity.get('entries') for per_capacity in capacity_info: content = per_capacity.get('content', {}) free = content.get('sizeFree') total = content.get('sizeTotal') used = content.get('sizeUsed') subs = content.get('sizeSubscribed') break if version_info: soft_version = version_info.get('entries') for soft_info in soft_version: content = soft_info.get('content', {}) if content: version = content.get('id') break raw_capacity = self.get_disk_capacity(context) raw_capacity = raw_capacity if raw_capacity else int(total) system_result = { 'name': name, 'vendor': 'DELL EMC', 'model': model, 'status': status, 'serial_number': serial_number, 'firmware_version': version, 'location': '', 'subscribed_capacity': int(subs), 'total_capacity': int(total), 'raw_capacity': raw_capacity, 'used_capacity': int(used), 'free_capacity': int(free) } return system_result def list_storage_pools(self, context): pool_info = self.rest_handler.get_all_pools() pool_list = [] pool_type = constants.StorageType.UNIFIED if pool_info is not None: pool_entries = pool_info.get('entries') for pool in pool_entries: content = pool.get('content', {}) health_value = content.get('health').get('value') if health_value in UnityStorDriver.HEALTH_OK: status = constants.StorageStatus.NORMAL else: status = constants.StorageStatus.ABNORMAL pool_result = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_storage_pool_id': str(content.get('id')), 'description': content.get('description'), 'status': status, 'storage_type': pool_type, 'total_capacity': int(content.get('sizeTotal')), 'subscribed_capacity': int(content.get('sizeSubscribed')), 'used_capacity': int(content.get('sizeUsed')), 'free_capacity': int(content.get('sizeFree')) } pool_list.append(pool_result) return pool_list def volume_handler(self, volumes, volume_list): if volumes is not None: vol_entries = volumes.get('entries') for volume in vol_entries: content = volume.get('content', {}) total = content.get('sizeTotal') used = content.get('sizeAllocated') vol_type = constants.VolumeType.THICK if content.get('isThinEnabled') is True: vol_type = constants.VolumeType.THIN health_value = content.get('health').get('value') if health_value in UnityStorDriver.HEALTH_OK: status = constants.StorageStatus.NORMAL else: status = constants.StorageStatus.ABNORMAL volume_result = { 'name': content.get('name'), 'storage_id': self.storage_id, 'description': content.get('description'), 'status': status, 'native_volume_id': str(content.get('id')), 'native_storage_pool_id': content.get('pool').get('id'), 'wwn': content.get('wwn'), 'type': vol_type, 'total_capacity': int(total), 'used_capacity': int(used), 'free_capacity': int(total - used) } volume_list.append(volume_result) def list_volumes(self, context): page_number = 1 volume_list = [] while True: luns = self.rest_handler.get_all_luns(page_number) if luns is None: break if 'entries' not in luns: break if len(luns['entries']) < 1: break self.volume_handler(luns, volume_list) page_number = page_number + 1 return volume_list def list_alerts(self, context, query_para=None): page_number = 1 alert_model_list = [] while True: alert_list = self.rest_handler.get_all_alerts(page_number) if not alert_list: alert_list = self.rest_handler.get_all_alerts_without_state( page_number) if alert_list is None: break if 'entries' not in alert_list: break if len(alert_list['entries']) < 1: break alert_handler.AlertHandler() \ .parse_queried_alerts(alert_model_list, alert_list, query_para) page_number = page_number + 1 return alert_model_list def list_controllers(self, context): try: controller_list = [] controller_info = self.rest_handler.get_all_controllers() if controller_info is not None: pool_entries = controller_info.get('entries') for pool in pool_entries: content = pool.get('content') if not content: continue health_value = content.get('health', {}).get('value') status = UnityStorDriver.CONTROLLER_STATUS_MAP.get( health_value, constants.ControllerStatus.FAULT ) controller_result = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_controller_id': content.get('id'), 'status': status, 'location': content.get('slotNumber'), 'memory_size': int(content.get('memorySize')) * units.Mi } controller_list.append(controller_result) return controller_list except Exception as err: err_msg = "Failed to get controller attributes from Unity: %s" %\ (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) @staticmethod def handle_port_ip(ip, result): if ip is None: ip = result else: ip = '%s;%s' % (ip, result) return ip def get_eth_ports(self): port_list = [] ports = self.rest_handler.get_all_ethports() ip_interfaces = self.rest_handler.get_port_interface() if ports: port_entries = ports.get('entries') for port in port_entries: content = port.get('content') if not content: continue health_value = content.get('health', {}).get('value') if health_value in UnityStorDriver.HEALTH_OK: status = constants.PortHealthStatus.NORMAL else: status = constants.PortHealthStatus.ABNORMAL conn_status = constants.PortConnectionStatus.CONNECTED if \ content.get('isLinkUp') is True \ else constants.PortConnectionStatus.DISCONNECTED ipv4 = None ipv4_mask = None ipv6 = None ipv6_mask = None if ip_interfaces: for ip_info in ip_interfaces.get('entries'): ip_content = ip_info.get('content') if not ip_content: continue if content.get('id') == ip_content.get( 'ipPort').get('id'): if ip_content.get('ipProtocolVersion') == 4: ipv4 = UnityStorDriver.handle_port_ip( ipv4, ip_content.get('ipAddress')) ipv4_mask = UnityStorDriver.handle_port_ip( ipv4_mask, ip_content.get('netmask')) else: ipv6 = UnityStorDriver.handle_port_ip( ipv6, ip_content.get('ipAddress')) ipv6_mask = UnityStorDriver.handle_port_ip( ipv6_mask, ip_content.get('netmask')) port_result = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_port_id': content.get('id'), 'location': content.get('name'), 'connection_status': conn_status, 'health_status': status, 'type': constants.PortType.ETH, 'logical_type': '', 'speed': int(content.get('speed')) * units.M if content.get('speed') is not None else None, 'max_speed': int(content.get('speed')) * units.M if content.get('speed') is not None else None, 'native_parent_id': content.get('storageProcessor', {}).get('id'), 'wwn': '', 'mac_address': content.get('macAddress'), 'ipv4': ipv4, 'ipv4_mask': ipv4_mask, 'ipv6': ipv6, 'ipv6_mask': ipv6_mask } port_list.append(port_result) return port_list def get_fc_ports(self): port_list = [] ports = self.rest_handler.get_all_fcports() if ports: port_entries = ports.get('entries') for port in port_entries: content = port.get('content') if not content: continue health_value = content.get('health', {}).get('value') connect_value = \ content.get('health', {}).get('descriptionIds', []) if 'ALRT_PORT_LINK_DOWN_NOT_IN_USE' in connect_value: conn_status = constants.PortConnectionStatus.DISCONNECTED elif 'ALRT_PORT_LINK_UP' in connect_value: conn_status = constants.PortConnectionStatus.CONNECTED else: conn_status = constants.PortConnectionStatus.UNKNOWN if health_value in UnityStorDriver.HEALTH_OK: status = constants.PortHealthStatus.NORMAL else: status = constants.PortHealthStatus.ABNORMAL port_result = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_port_id': content.get('id'), 'location': content.get('name'), 'connection_status': conn_status, 'health_status': status, 'type': constants.PortType.FC, 'logical_type': '', 'speed': int(content.get('currentSpeed')) * units.G if content.get('currentSpeed') is not None else None, 'max_speed': int(content.get('currentSpeed')) * units.G if content.get('currentSpeed') is not None else None, 'native_parent_id': content.get('storageProcessor', {}).get('id'), 'wwn': content.get('wwn') } port_list.append(port_result) return port_list def list_ports(self, context): try: port_list = [] port_list.extend(self.get_eth_ports()) port_list.extend(self.get_fc_ports()) return port_list except Exception as err: err_msg = "Failed to get ports attributes from Unity: %s" % \ (six.text_type(err)) raise exception.InvalidResults(err_msg) def list_disks(self, context): try: disks = self.rest_handler.get_all_disks() disk_list = [] if disks and disks.get('entries'): disk_entries = disks.get('entries') for disk in disk_entries: content = disk.get('content') if not content: continue health_value = content.get('health', {}).get('value') slot_info = \ content.get('health', {}).get('descriptionIds', []) if 'ALRT_DISK_SLOT_EMPTY' in slot_info: continue if health_value in UnityStorDriver.HEALTH_OK: status = constants.DiskStatus.NORMAL else: status = constants.DiskStatus.ABNORMAL physical_type = UnityStorDriver.DISK_TYPE_MAP.get( content.get('diskTechnology'), constants.DiskPhysicalType.UNKNOWN) disk_result = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_disk_id': content.get('id'), 'serial_number': content.get('emcSerialNumber'), 'manufacturer': content.get('manufacturer'), 'model': content.get('model'), 'firmware': content.get('version'), 'speed': int(content.get('rpm')), 'capacity': int(content.get('size')), 'status': status, 'physical_type': physical_type, 'logical_type': '', 'native_disk_group_id': content.get('diskGroup', {}).get('id'), 'location': content.get('name') } disk_list.append(disk_result) else: disk_list = self.get_virtual_disk() return disk_list except Exception as err: err_msg = "Failed to get disk attributes from Unity: %s" % \ (six.text_type(err)) raise exception.InvalidResults(err_msg) def list_filesystems(self, context): try: files = self.rest_handler.get_all_filesystems() if not files: files = self.rest_handler.get_all_filesystems_without_flr() fs_list = [] if files is not None: fs_entries = files.get('entries') for file in fs_entries: content = file.get('content') if not content: continue health_value = content.get('health', {}).get('value') if health_value in UnityStorDriver.HEALTH_OK: status = constants.FilesystemStatus.NORMAL else: status = constants.FilesystemStatus.FAULTY fs_type = constants.VolumeType.THICK if content.get('isThinEnabled') is True: fs_type = constants.VolumeType.THIN worm = UnityStorDriver.FILESYSTEM_FLR_MAP.get( content.get('flrVersion'), constants.WORMType.NON_WORM) security_model = \ UnityStorDriver.FILESYSTEM_SECURITY_MAP.get( content.get('accessPolicy'), constants.NASSecurityMode.NATIVE ) fs = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_filesystem_id': content.get('id'), 'native_pool_id': content.get('pool', {}).get('id'), 'status': status, 'type': fs_type, 'total_capacity': int(content.get('sizeTotal')), 'used_capacity': int(content.get('sizeUsed')), 'free_capacity': int(content.get('sizeTotal')) - int( content.get('sizeUsed')), 'worm': worm, 'security_mode': security_model } fs_list.append(fs) return fs_list except Exception as err: err_msg = "Failed to get filesystem attributes from Unity: %s"\ % (six.text_type(err)) raise exception.InvalidResults(err_msg) def list_qtrees(self, context): try: qts = self.rest_handler.get_all_qtrees() qt_list = [] if qts is not None: qts_entries = qts.get('entries') for qtree in qts_entries: content = qtree.get('content') if not content: continue qt = { 'name': content.get('path'), 'storage_id': self.storage_id, 'native_qtree_id': content.get('id'), 'native_filesystem_id': content.get('filesystem', {}).get('id'), 'path': content.get('path') } qt_list.append(qt) return qt_list except Exception as err: err_msg = "Failed to get qtree attributes from Unity: %s"\ % (six.text_type(err)) raise exception.InvalidResults(err_msg) def get_share_qtree(self, path, qtree_list): qtree_id = None if not qtree_list: return qtree_id qts_entries = qtree_list.get('entries') for qtree in qts_entries: content = qtree.get('content') if not content: continue if content.get('path') == path: qtree_id = content.get('id') break return qtree_id def get_share(self, protocol, qtree_list, filesystems): try: share_list = [] if protocol == 'cifs': shares = self.rest_handler.get_all_cifsshares() protocol = constants.ShareProtocol.CIFS else: shares = self.rest_handler.get_all_nfsshares() protocol = constants.ShareProtocol.NFS if shares is not None: share_entries = shares.get('entries') for share in share_entries: content = share.get('content') if not content: continue file_name = '' if filesystems: file_entries = filesystems.get('entries') for file in file_entries: file_content = file.get('content') if not file_content: continue if file_content.get('id') == content.get( 'filesystem', {}).get('id'): file_name = file_content.get('name') break path = '/%s%s' % (file_name, content.get('path')) if \ file_name != '' else content.get('path') fs = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_share_id': content.get('id'), 'native_qtree_id': self.get_share_qtree( content.get('path'), qtree_list), 'native_filesystem_id': content.get('filesystem', {}).get('id'), 'path': path, 'protocol': protocol } share_list.append(fs) return share_list except Exception as err: err_msg = "Failed to get share attributes from Unity: %s"\ % (six.text_type(err)) raise exception.InvalidResults(err_msg) def list_shares(self, context): try: share_list = [] qtrees = self.rest_handler.get_all_qtrees() filesystems = self.rest_handler.get_all_filesystems() if not filesystems: filesystems = \ self.rest_handler.get_all_filesystems_without_flr() share_list.extend(self.get_share('cifs', qtrees, filesystems)) share_list.extend(self.get_share('nfs', qtrees, filesystems)) return share_list except Exception as err: err_msg = "Failed to get shares attributes from Unity: %s"\ % (six.text_type(err)) raise exception.InvalidResults(err_msg) def get_tree_quotas(self): quotas_list = [] qts = self.rest_handler.get_all_qtrees() if qts is None: return quotas_list qt_entries = qts.get('entries') for quota in qt_entries: content = quota.get('content') if not content: continue qt = { "native_quota_id": content.get('id'), "type": constants.QuotaType.TREE, "storage_id": self.storage_id, "native_filesystem_id": content.get('filesystem', {}).get('id'), "native_qtree_id": content.get('id'), "capacity_hard_limit": content.get('hardLimit'), "capacity_soft_limit": content.get('softLimit'), "used_capacity": int(content.get('sizeUsed')) } quotas_list.append(qt) return quotas_list def get_user_quotas(self): quotas_list = [] user_qts = self.rest_handler.get_all_userquotas() if user_qts is None: return quotas_list user_entries = user_qts.get('entries') for user_quota in user_entries: content = user_quota.get('content') if not content: continue qt = { "native_quota_id": content.get('id'), "type": constants.QuotaType.USER, "storage_id": self.storage_id, "native_filesystem_id": content.get('filesystem', {}).get('id'), "native_qtree_id": content.get('treeQuota', {}).get('id'), "capacity_hard_limit": content.get('hardLimit'), "capacity_soft_limit": content.get('softLimit'), "used_capacity": int(content.get('sizeUsed')), "user_group_name": str(content.get('uid')) } quotas_list.append(qt) return quotas_list def list_quotas(self, context): try: quotas_list = [] quotas_list.extend(self.get_tree_quotas()) quotas_list.extend(self.get_user_quotas()) return quotas_list except Exception as err: err_msg = "Failed to get quotas attributes from Unity: %s"\ % (six.text_type(err)) raise exception.InvalidResults(err_msg) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return AlertHandler.parse_alert(context, alert) def clear_alert(self, context, alert): return self.rest_handler.remove_alert(alert) @staticmethod def get_access_url(): return 'https://{ip}' def list_storage_host_initiators(self, context): try: initiator_list = [] page = 1 while True: initiators = self.rest_handler.get_host_initiators(page) if not initiators: return initiator_list if 'entries' not in initiators or \ len(initiators['entries']) < 1: break init_entries = initiators.get('entries') for initiator in init_entries: content = initiator.get('content') if not content: continue health_value = content.get('health', {}).get('value') status = UnityStorDriver.INITIATOR_STATUS_MAP.get( health_value, constants.InitiatorStatus.UNKNOWN ) init_result = { "name": content.get('initiatorId'), "storage_id": self.storage_id, "native_storage_host_initiator_id": content.get('id'), "wwn": content.get('initiatorId'), "status": status, "type": UnityStorDriver.INITIATOR_TYPE_MAP.get( content.get('type')), "native_storage_host_id": content.get( 'parentHost', {}).get('id') } initiator_list.append(init_result) page += 1 return initiator_list except Exception as e: LOG.error("Failed to get initiators from unity") raise e def list_storage_hosts(self, context): try: host_list = [] page = 1 while True: hosts = self.rest_handler.get_all_hosts(page) if not hosts: return host_list if 'entries' not in hosts or len(hosts['entries']) < 1: break ips = self.rest_handler.get_host_ip() host_entries = hosts.get('entries') for host in host_entries: host_ip = None content = host.get('content') if not content: continue health_value = content.get('health', {}).get('value') status = UnityStorDriver.HOST_STATUS_MAP.get( health_value, constants.HostStatus.OFFLINE ) if ips: ip_entries = ips.get('entries') for ip in ip_entries: ip_content = ip.get('content') if not ip_content: continue if ip_content.get('host', {}).get('id') \ == content.get('id'): host_ip = ip_content.get('address') break if content.get('osType'): if 'VMware ESXi' in content.get('osType'): os_type = constants.HostOSTypes.VMWARE_ESX else: os_type = UnityStorDriver.OS_TYPE_MAP.get( content.get('osType'), constants.HostOSTypes.UNKNOWN) else: os_type = None host_result = { "name": content.get('name'), "description": content.get('description'), "storage_id": self.storage_id, "native_storage_host_id": content.get('id'), "os_type": os_type, "status": status, "ip_address": host_ip } host_list.append(host_result) page += 1 return host_list except Exception as e: LOG.error("Failed to get host metrics from unity") raise e def list_masking_views(self, context): try: view_list = [] page = 1 while True: views = self.rest_handler.get_host_lun(page) if not views: return view_list if 'entries' not in views or len(views['entries']) < 1: break view_entries = views.get('entries') for view in view_entries: content = view.get('content') view_result = { "name": content.get('id'), "native_storage_host_id": content.get('host', {}).get('id'), "storage_id": self.storage_id, "native_volume_id": content.get('lun', {}).get('id'), "native_masking_view_id": content.get('id'), } view_list.append(view_result) page += 1 return view_list except Exception as e: LOG.error("Failed to get view metrics from unity") raise e def get_metrics_loop(self, target, start_time, end_time, metrics, path): page = 1 bend = False time_map = {'latest_time': 0} if not path: return while True: if bend is True: break results = self.rest_handler.get_history_metrics(path, page) if not results: break if 'entries' not in results: break if len(results['entries']) < 1: break bend = UnityStorDriver.get_metric_value( target, start_time, end_time, metrics, results, time_map) page += 1 def get_history_metrics(self, resource_type, targets, start_time, end_time): metrics = [] for target in targets: path = None if resource_type == constants.ResourceType.VOLUME: path = self.VOLUME_PERF_METRICS.get(target) elif resource_type == constants.ResourceType.DISK: path = self.DISK_PERF_METRICS.get(target) elif resource_type == constants.ResourceType.FILESYSTEM: path = self.FILESYSTEM_PERF_METRICS.get(target) elif resource_type == constants.ResourceType.PORT: self.get_metrics_loop(target, start_time, end_time, metrics, self.ETHERNET_PORT_METRICS.get(target)) self.get_metrics_loop(target, start_time, end_time, metrics, self.FC_PORT_METRICS.get(target)) continue if path: self.get_metrics_loop(target, start_time, end_time, metrics, path) return metrics @staticmethod def get_metric_value(target, start_time, end_time, metrics, results, time_map): try: if results is None: return True entries = results.get('entries') for entry in entries: content = entry.get('content') if not content or not content.get('values'): continue occur_time = int(time.mktime(time.strptime( content.get('timestamp'), AlertHandler.TIME_PATTERN)) ) * AlertHandler.SECONDS_TO_MS hour_offset = (time.mktime(time.localtime()) - time.mktime( time.gmtime())) / AlertHandler.SECONDS_PER_HOUR occur_time = occur_time + (int(hour_offset) * UnityStorDriver.MS_PER_HOUR) if occur_time < start_time: return True if time_map.get('latest_time') <= occur_time \ and time_map.get('latest_time') != 0: continue time_map['latest_time'] = occur_time if start_time <= occur_time <= end_time: for sp_value in content.get('values'): perf_value = content.get('values').get(sp_value) for key, value in perf_value.items(): bfind = False value = float(value) for metric in metrics: if metric.get('resource_id') == key and \ metric.get('type') == target: if metric.get('values').get( occur_time): if target == 'responseTime': metric.get( 'values')[occur_time] = \ max(value, metric.get( 'values').get( occur_time)) else: metric.get('values')[ occur_time] += value else: metric.get('values')[occur_time] \ = value bfind = True break if bfind is False: metric_value = { 'type': target, 'resource_id': key, 'values': {occur_time: value} } metrics.append(metric_value) except Exception as err: err_msg = "Failed to collect history metrics from Unity: %s, " \ "target:%s" % (six.text_type(err), target) LOG.error(err_msg) return False @staticmethod def count_total_perf(metrics): if metrics is None: return for metric in metrics: write_tye = None total_type = None if UnityStorDriver.PERF_TYPE_MAP.get(metric.get('type')): write_tye = UnityStorDriver.PERF_TYPE_MAP.get( metric.get('type')).get('write') total_type = UnityStorDriver.PERF_TYPE_MAP.get( metric.get('type')).get('total') else: continue for metric_write in metrics: if metric_write.get('resource_id') == \ metric.get('resource_id') \ and metric_write.get('type') == write_tye: total = { 'type': total_type, 'resource_id': metric.get('resource_id') } bfind_total = False for tr, read in metric.get('values').items(): for tw, write in metric_write.get( 'values').items(): if tr == tw: value = read + write if total.get('values'): total['values'][tr] = value else: total['values'] = {tr: value} bfind_total = True break if bfind_total: metrics.append(total) break @staticmethod def package_metrics(storage_id, resource_type, metrics, metrics_list): for metric in metrics_list: unit = None if resource_type == constants.ResourceType.PORT: unit = consts.PORT_CAP[metric.get('type')]['unit'] elif resource_type == constants.ResourceType.VOLUME: unit = consts.VOLUME_CAP[metric.get('type')]['unit'] elif resource_type == constants.ResourceType.DISK: unit = consts.DISK_CAP[metric.get('type')]['unit'] elif resource_type == constants.ResourceType.FILESYSTEM: unit = consts.FILESYSTEM_CAP[metric.get('type')]['unit'] labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': metric.get('resource_id'), 'type': 'RAW', 'unit': unit } if 'THROUGHPUT' in metric.get('type').upper() or \ 'RESPONSETIME' in metric.get('type').upper(): for tm in metric.get('values'): metric['values'][tm] = metric['values'][tm] / units.k value = constants.metric_struct(name=metric.get('type'), labels=labels, values=metric.get('values')) metrics.append(value) def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): metrics = [] try: if resource_metrics.get(constants.ResourceType.VOLUME): volume_metrics = self.get_history_metrics( constants.ResourceType.VOLUME, resource_metrics.get(constants.ResourceType.VOLUME), start_time, end_time) UnityStorDriver.count_total_perf(volume_metrics) UnityStorDriver.package_metrics(storage_id, constants.ResourceType.VOLUME, metrics, volume_metrics) if resource_metrics.get(constants.ResourceType.DISK): disk_metrics = self.get_history_metrics( constants.ResourceType.DISK, resource_metrics.get(constants.ResourceType.DISK), start_time, end_time) UnityStorDriver.count_total_perf(disk_metrics) UnityStorDriver.package_metrics(storage_id, constants.ResourceType.DISK, metrics, disk_metrics) if resource_metrics.get(constants.ResourceType.PORT): port_metrics = self.get_history_metrics( constants.ResourceType.PORT, resource_metrics.get(constants.ResourceType.PORT), start_time, end_time) UnityStorDriver.count_total_perf(port_metrics) UnityStorDriver.package_metrics(storage_id, constants.ResourceType.PORT, metrics, port_metrics) if resource_metrics.get(constants.ResourceType.FILESYSTEM): file_metrics = self.get_history_metrics( constants.ResourceType.FILESYSTEM, resource_metrics.get(constants.ResourceType.FILESYSTEM), start_time, end_time) UnityStorDriver.count_total_perf(file_metrics) UnityStorDriver.package_metrics( storage_id, constants.ResourceType.FILESYSTEM, metrics, file_metrics) except Exception as err: err_msg = "Failed to collect metrics from Unity: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return metrics @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver""" return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP, constants.ResourceType.FILESYSTEM: consts.FILESYSTEM_CAP } } def get_latest_perf_timestamp(self, context): latest_time = 0 page = 1 results = self.rest_handler.get_history_metrics( UnityStorDriver.VOLUME_PERF_METRICS.get('readIops'), page) if not results: results = self.rest_handler.get_history_metrics( UnityStorDriver.ETHERNET_PORT_METRICS.get('readIops'), page) if results: if 'entries' in results: entries = results.get('entries') for entry in entries: content = entry.get('content') if not content: continue occur_time = int(time.mktime(time.strptime( content.get('timestamp'), AlertHandler.TIME_PATTERN)) ) * AlertHandler.SECONDS_TO_MS hour_offset = \ (time.mktime(time.localtime()) - time.mktime(time.gmtime()))\ / AlertHandler.SECONDS_PER_HOUR occur_time = occur_time + (int(hour_offset) * UnityStorDriver.MS_PER_HOUR) latest_time = occur_time break return latest_time def get_virtual_disk(self): try: disks = self.rest_handler.get_virtual_disks() disk_list = [] if disks is not None: disk_entries = disks.get('entries') for disk in disk_entries: content = disk.get('content') if not content: continue health_value = content.get('health', {}).get('value') slot_info = \ content.get('health', {}).get('descriptionIds', []) if 'ALRT_DISK_SLOT_EMPTY' in slot_info: continue if health_value in UnityStorDriver.HEALTH_OK: status = constants.DiskStatus.NORMAL else: status = constants.DiskStatus.ABNORMAL disk_result = { 'name': content.get('name'), 'storage_id': self.storage_id, 'native_disk_id': content.get('id'), 'capacity': int(content.get('sizeTotal')), 'status': status, 'manufacturer': content.get('manufacturer'), 'model': content.get('model'), 'physical_type': constants.DiskPhysicalType.VMDISK } disk_list.append(disk_result) return disk_list except Exception as err: err_msg = "Failed to get virtual disk from Unity: %s" % \ (six.text_type(err)) raise exception.InvalidResults(err_msg) ================================================ FILE: delfin/drivers/dell_emc/vmax/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/vmax/alert_handler/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/vmax/alert_handler/alert_mapper.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file contains mapping of alert information to vmax specific description # component type to descriptive info component_type_mapping = { '1024': 'Symmetrix', '1025': 'Service Processor', '1026': 'Device', '1027': 'Physical Disk', '1028': 'Director', '1029': 'Port', '1030': 'SRDF sub-system', '1031': 'SRDF group', '1032': 'Snap Save Device Pool', '1033': 'Cache / Memory', '1034': 'Power or Battery subsystem', '1035': 'Environmental (e.g.: Temperature, Smoke)', '1036': 'Diagnostics', '1037': 'Communications sub-system', '1038': 'External Lock', '1039': 'Fan', '1040': 'Link Controller Card', '1041': 'Enclosure, Enclosure-Slot or MIBE', '1042': 'SRDF/A DSE Device Pool', '1043': 'Thin Device Data Pool', '1044': 'Solutions Enabler DG group', '1045': 'Solutions Enabler CG group', '1046': 'Management Module', '1047': 'IO Module Carrier', '1048': 'Director - Environmental', '1049': 'Storage Group', '1050': 'Migration Session', '1051': 'Symmetrix Disk Group' } # Alarm id to alarm name mapping # Currently this contains limited list, to be extended alarm_id_name_mapping = { '1': 'SYMAPI_AEVENT2_UID_EVT_RESTARTED', '2': 'SYMAPI_AEVENT2_UID_EVT_EVENTS_LOST', '3': 'SYMAPI_AEVENT2_UID_EVT_EVENTS_OVERFLOW', '1050': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG', '1051': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG_REM' } ================================================ FILE: delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class OidMapper(object): """Functions/attributes for oid to alert info mapper""" # Map to translate trap oid strings to oid names OID_MAP = {"1.3.6.1.3.94.1.11.1.3": "connUnitEventId", "1.3.6.1.3.94.1.11.1.6": "connUnitEventSeverity", "1.3.6.1.3.94.1.11.1.7": "connUnitEventType", "1.3.6.1.3.94.1.11.1.8": "connUnitEventObject", "1.3.6.1.3.94.1.11.1.9": "connUnitEventDescr", "1.3.6.1.3.94.1.6.1.20": "connUnitName", "1.3.6.1.3.94.1.6.1.3": "connUnitType", "1.3.6.1.4.1.1139.3.8888.1.0": "emcAsyncEventSource", "1.3.6.1.4.1.1139.3.8888.2.0": "emcAsyncEventCode", "1.3.6.1.4.1.1139.3.8888.3.0": "emcAsyncEventComponentType", "1.3.6.1.4.1.1139.3.8888.4.0": "emcAsyncEventComponentName"} def __init__(self): pass @staticmethod def map_oids(alert): """Translate oids using static map.""" alert_model = dict() for attr in alert: # Remove the instance number at the end of oid before mapping oid_str = attr.rsplit('.', 1)[0] key = OidMapper.OID_MAP.get(oid_str, None) alert_model[key] = alert[attr] return alert_model ================================================ FILE: delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from time import gmtime, strftime from oslo_log import log from delfin import exception from delfin.common import constants from delfin.drivers.dell_emc.vmax.alert_handler import alert_mapper from delfin.drivers.dell_emc.vmax.alert_handler import oid_mapper LOG = log.getLogger(__name__) class AlertHandler(object): """Alert handling functions for vmax snmp traps""" # Translation of trap severity to alert model severity # Values are: # unknown 1, emergency 2, alert 3, critical 4, error 5, # warning 6, alert 3, notify 7, info 8, debug 9, mark 10 SEVERITY_MAP = {"2": constants.Severity.FATAL, "3": constants.Severity.CRITICAL, "4": constants.Severity.CRITICAL, "5": constants.Severity.MAJOR, "6": constants.Severity.WARNING, "7": constants.Severity.WARNING, "8": constants.Severity.INFORMATIONAL, "9": constants.Severity.INFORMATIONAL, "10": constants.Severity.INFORMATIONAL} # Attributes mandatory in alert info to proceed with model filling _mandatory_alert_attributes = ('emcAsyncEventCode', 'connUnitEventSeverity', 'connUnitEventType', 'connUnitEventDescr', 'connUnitType', 'emcAsyncEventComponentType', 'emcAsyncEventComponentName', 'emcAsyncEventSource') @staticmethod def parse_alert(context, alert): """Parse alert data got from alert manager and fill the alert model.""" alert = oid_mapper.OidMapper.map_oids(alert) # Check for mandatory alert attributes for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr raise exception.InvalidInput(msg) alert_model = {} # Fill alarm id and fill alarm_name with corresponding mapping names alert_model['alert_id'] = alert['emcAsyncEventCode'] alert_model['alert_name'] = alert_mapper.alarm_id_name_mapping.get( alert_model['alert_id'], alert_model['alert_id']) alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert['connUnitEventSeverity'], constants.Severity.INFORMATIONAL) alert_model['category'] = constants.Category.NOT_SPECIFIED alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert['connUnitEventId'] # trap info do not contain occur time, update with received time # Get date and time and convert to epoch format pattern = '%Y-%m-%d %H:%M:%S' curr_time = strftime(pattern, gmtime()) alert_model['occur_time'] = int(time.mktime(time.strptime(curr_time, pattern))) alert_model['description'] = alert['connUnitEventDescr'] alert_model['recovery_advice'] = 'None' alert_model['resource_type'] = alert['connUnitType'] # Location is name-value pair having component type and component name component_type = alert_mapper.component_type_mapping.get( alert.get('emcAsyncEventComponentType'), "") alert_model['location'] = 'Array id=' \ + alert['connUnitName'] \ + ',Component type=' \ + component_type \ + ',Component name=' \ + alert['emcAsyncEventComponentName'] \ + ',Event source=' \ + alert['emcAsyncEventSource'] if alert['connUnitName']: alert_model['serial_number'] = alert['connUnitName'] return alert_model def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" pass def remove_trap_config(self, context, storage_id, trap_config): """Remove trap receiver configuration from storage system.""" pass def clear_alert(self, context, storage_id, alert): """Clear alert from storage system.""" pass ================================================ FILE: delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin import exception from delfin.common import constants LOG = log.getLogger(__name__) class AlertHandler(object): """Alert handling functions for unisphere alerts""" # Alert Id and name are not part of queried alerts DEFAULT_UNISPHERE_ALERT_NAME = "Unisphere alert about vmax" DEFAULT_UNISPHERE_ALERT_ID = 0xFFFFFFFF # Translation of queried alert severity to alert model severity SEVERITY_MAP = {"FATAL": constants.Severity.FATAL, "CRITICAL": constants.Severity.CRITICAL, "WARNING": constants.Severity.WARNING, "NORMAL": constants.Severity.INFORMATIONAL, "INFORMATION": constants.Severity.INFORMATIONAL} def __init__(self): pass def parse_queried_alerts(self, alert_list): """Parse queried alerts and convert to alert model.""" alert_model_list = [] for alert in alert_list: try: alert_model = dict() alert_model['alert_id'] = self.DEFAULT_UNISPHERE_ALERT_ID alert_model['alert_name'] = self.DEFAULT_UNISPHERE_ALERT_NAME alert_model['severity'] = self.SEVERITY_MAP.get( alert['severity'], constants.Severity.NOT_SPECIFIED) # category and type are not part of queried alerts alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert['alertId'] alert_model['occur_time'] = alert['created_date_milliseconds'] alert_model['description'] = alert['description'] alert_model['recovery_advice'] = 'None' alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE # Location is name-value pair alert_model['location'] = 'type=' + alert['type'] alert_model_list.append(alert_model) except Exception as e: LOG.error(e) msg = ("Failed to build alert model as some attributes " "missing in alert message") raise exception.InvalidResults(msg) return alert_model_list def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" pass def remove_trap_config(self, context, storage_id, trap_config): """Remove trap receiver configuration from storage system.""" pass def clear_alert(self, context, storage_id, alert): """Clear alert from storage system.""" pass ================================================ FILE: delfin/drivers/dell_emc/vmax/client.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import constants from delfin.drivers.dell_emc.vmax import constants as consts from delfin.drivers.dell_emc.vmax import rest, perf_utils LOG = log.getLogger(__name__) EMBEDDED_UNISPHERE_ARRAY_COUNT = 1 class VMAXClient(object): """ Client class for communicating with VMAX storage """ def __init__(self, **kwargs): self.uni_version = None self.array_id = {} rest_access = kwargs.get('rest') if rest_access is None: raise exception.InvalidInput('Input rest_access is missing') self.rest = rest.VMaxRest() self.rest.set_rest_credentials(rest_access) self.reset_connection(**kwargs) def reset_connection(self, **kwargs): """ Reset connection to VMAX storage with new configs """ self.rest.verify = kwargs.get('verify', False) self.rest.establish_rest_session() def init_connection(self, access_info): """ Given the access_info get a connection to VMAX storage """ try: ver, self.uni_version = self.rest.get_uni_version() LOG.info('Connected to Unisphere Version: {0}'.format(ver)) except exception.InvalidUsernameOrPassword as e: msg = "Failed to connect VMAX. Reason: {}".format(e.msg) LOG.error(msg) raise e except (exception.SSLCertificateFailed, exception.SSLHandshakeFailed) as e: msg = ("Failed to connect to VMAX: {}".format(e)) LOG.error(msg) raise except Exception as err: msg = ("Failed to connect to VMAX. Host or Port is not correct: " "{}".format(err)) LOG.error(msg) raise exception.InvalidIpOrPort() if not self.uni_version: msg = "Invalid input. Failed to get vmax unisphere version" raise exception.InvalidInput(msg) def add_storage(self, access_info): storage_name = access_info.get('storage_name') try: # Get array details from unisphere array = self.rest.get_array_detail(version=self.uni_version) if not array: msg = "Failed to get array details" raise exception.InvalidInput(msg) if len(array['symmetrixId']) == EMBEDDED_UNISPHERE_ARRAY_COUNT: if not storage_name: storage_name = array['symmetrixId'][0] elif storage_name != array['symmetrixId'][0]: msg = "Invalid storage_name. Expected: {}". \ format(array['symmetrixId']) raise exception.InvalidInput(msg) else: if not storage_name: msg = "Input storage_name is missing. Supported ids: {}". \ format(array['symmetrixId']) raise exception.InvalidInput(msg) array_ids = array.get('symmetrixId', list()) if storage_name not in array_ids: msg = "Failed to get VMAX array id from Unisphere" raise exception.InvalidInput(msg) self.array_id[access_info['storage_id']] = storage_name except Exception: LOG.error("Failed to add storage from VMAX") raise def get_array_details(self, storage_id): try: array_id = self.array_id.get(storage_id) # Get the VMAX array properties return self.rest.get_vmax_array_details(version=self.uni_version, array=array_id) except Exception: LOG.error("Failed to get array details from VMAX") raise def get_storage_capacity(self, storage_id): try: storage_info = self.rest.get_system_capacity( self.array_id[storage_id], self.uni_version) total_capacity = 0 used_capacity = 0 free_capacity = 0 raw_capacity = 0 subscribed_capacity = 0 if int(self.uni_version) < 90: physical_capacity = storage_info.get('physicalCapacity') total_cap = storage_info.get('total_usable_cap_gb') used_cap = storage_info.get('total_allocated_cap_gb') subscribed_cap = storage_info.get('total_subscribed_cap_gb') total_raw = physical_capacity.get('total_capacity_gb') free_cap = total_cap - used_cap total_capacity = int(total_cap * units.Gi) used_capacity = int(used_cap * units.Gi) free_capacity = int(free_cap * units.Gi) raw_capacity = int(total_raw * units.Gi) subscribed_capacity = int(subscribed_cap * units.Gi) else: system_capacity = storage_info['system_capacity'] physical_capacity = storage_info.get('physicalCapacity') total_cap = system_capacity.get('usable_total_tb') used_cap = system_capacity.get('usable_used_tb') subscribed_cap = system_capacity.get('subscribed_total_tb') total_raw = physical_capacity.get('total_capacity_gb') free_cap = total_cap - used_cap total_capacity = int(total_cap * units.Ti) used_capacity = int(used_cap * units.Ti) free_capacity = int(free_cap * units.Ti) raw_capacity = int(total_raw * units.Gi) subscribed_capacity = int(subscribed_cap * units.Ti) return total_capacity, used_capacity, free_capacity,\ raw_capacity, subscribed_capacity except Exception: LOG.error("Failed to get capacity from VMAX") raise def list_storage_pools(self, storage_id): try: # Get list of SRP pool names pools = self.rest.get_srp_by_name( self.array_id[storage_id], self.uni_version, srp='')['srpId'] pool_list = [] for pool in pools: pool_info = self.rest.get_srp_by_name( self.array_id[storage_id], self.uni_version, srp=pool) total_cap = 0 used_cap = 0 subscribed_cap = 0 if int(self.uni_version) < 90: total_cap = pool_info['total_usable_cap_gb'] * units.Gi used_cap = pool_info['total_allocated_cap_gb'] * units.Gi subscribed_cap = \ pool_info['total_subscribed_cap_gb'] * units.Gi else: srp_cap = pool_info['srp_capacity'] total_cap = srp_cap['usable_total_tb'] * units.Ti used_cap = srp_cap['usable_used_tb'] * units.Ti subscribed_cap = srp_cap['subscribed_total_tb'] * units.Ti p = { "name": pool, "storage_id": storage_id, "native_storage_pool_id": pool_info["srpId"], "description": "Dell EMC VMAX Pool", "status": constants.StoragePoolStatus.NORMAL, "storage_type": constants.StorageType.BLOCK, "total_capacity": int(total_cap), "used_capacity": int(used_cap), "free_capacity": int(total_cap - used_cap), "subscribed_capacity": int(subscribed_cap), } pool_list.append(p) return pool_list except Exception: LOG.error("Failed to get pool metrics from VMAX") raise def list_volumes(self, storage_id): try: # Get default SRPs assigned for the array default_srps = self.rest.get_default_srps( self.array_id[storage_id], version=self.uni_version) # List all volumes except data volumes volumes = self.rest.get_volume_list( self.array_id[storage_id], version=self.uni_version, params={'data_volume': 'false'}) # TODO: Update constants.VolumeStatus to make mapping more precise switcher = { 'Ready': constants.VolumeStatus.AVAILABLE, 'Not Ready': constants.VolumeStatus.AVAILABLE, 'Mixed': constants.VolumeStatus.AVAILABLE, 'Write Disabled': constants.VolumeStatus.AVAILABLE, 'N/A': constants.VolumeStatus.ERROR, } volume_list = [] for volume in volumes: # Get volume details vol = self.rest.get_volume(self.array_id[storage_id], self.uni_version, volume) emulation_type = vol['emulation'] total_cap = vol['cap_mb'] * units.Mi used_cap = (total_cap * vol['allocated_percent']) / 100.0 free_cap = total_cap - used_cap status = switcher.get(vol.get('status'), constants.VolumeStatus.AVAILABLE) description = "Dell EMC VMAX volume" if vol['type'] == 'TDEV': description = "Dell EMC VMAX 'thin device' volume" name = volume if vol.get('volume_identifier'): name = volume + ':' + vol['volume_identifier'] v = { "name": name, "storage_id": storage_id, "description": description, "status": status, "native_volume_id": vol['volumeId'], "wwn": vol['wwn'], "type": constants.VolumeType.THIN, "total_capacity": int(total_cap), "used_capacity": int(used_cap), "free_capacity": int(free_cap), } if vol['num_of_storage_groups'] == 1: sg = vol['storageGroupId'][0] sg_info = self.rest.get_storage_group( self.array_id[storage_id], self.uni_version, sg) v['native_storage_pool_id'] = \ sg_info.get('srp', default_srps[emulation_type]) v['compressed'] = sg_info.get('compression', False) else: v['native_storage_pool_id'] = default_srps[emulation_type] volume_list.append(v) return volume_list except Exception: LOG.error("Failed to get list volumes from VMAX") raise def list_controllers(self, storage_id): try: # Get list of Directors directors = self.rest.get_director_list(self.array_id[storage_id], self.uni_version) controller_list = [] for director in directors: director_info = self.rest.get_director( self.array_id[storage_id], self.uni_version, director) status = constants.ControllerStatus.NORMAL if "OFF" in director_info.get('availability', '').upper(): status = constants.ControllerStatus.OFFLINE controller = { 'name': director_info['directorId'], 'storage_id': storage_id, 'native_controller_id': director_info['directorId'], 'status': status, 'location': 'slot_' + str(director_info.get('director_slot_number')), 'soft_version': None, 'cpu_info': 'Cores-' + str(director_info.get('num_of_cores')), 'memory_size': None } controller_list.append(controller) return controller_list except Exception: LOG.error("Failed to get controller metrics from VMAX") raise def list_ports(self, storage_id): try: # Get list of Directors directors = self.rest.get_director_list(self.array_id[storage_id], self.uni_version) except Exception: LOG.error("Failed to get director list," " while getting port metrics from VMAX") raise switcher = { 'A': constants.PortLogicalType.MANAGEMENT, 'B': constants.PortLogicalType.SERVICE, 'C': constants.PortLogicalType.BACKEND, } port_list = [] for director in directors: try: port_keys = self.rest.get_port_list( self.array_id[storage_id], self.uni_version, director) for port_key in port_keys: port_info = self.rest.get_port( self.array_id[storage_id], self.uni_version, director, port_key['portId'])['symmetrixPort'] connection_status = \ constants.PortConnectionStatus.CONNECTED if port_info.get('port_status', '').upper().find('OFF') != -1: connection_status = \ constants.PortConnectionStatus.DISCONNECTED port_type = constants.PortType.OTHER if port_info.get('type', '').upper().find('FIBRE') != -1: port_type = constants.PortType.FC if port_info.get('type', '').upper().find('ETH') != -1: port_type = constants.PortType.ETH name = "{0}:{1}".format(port_key['directorId'], port_key['portId']) director_emulation = port_key['directorId'][4] logical_type = switcher.get( director_emulation, constants.PortLogicalType.OTHER) if logical_type == constants.PortLogicalType.OTHER: port_prefix = port_key['directorId'][:2] if port_prefix in ['FA', 'FE', 'EA', 'EF', 'SE']: logical_type = constants.PortLogicalType.FRONTEND if port_prefix in ['DA', 'DF', 'DX']: logical_type = constants.PortLogicalType.BACKEND speed = int(port_info.get('negotiated_speed', '0')) * units.Gi max_speed = int(port_info.get('max_speed', '0')) * units.Gi port_dict = { 'name': name, 'storage_id': storage_id, 'native_port_id': port_key['portId'], 'location': 'director_' + port_key['directorId'], 'connection_status': connection_status, 'health_status': constants.PortHealthStatus.NORMAL, 'type': port_type, 'logical_type': logical_type, 'speed': speed, 'max_speed': max_speed, 'native_parent_id': port_key['directorId'], 'wwn': port_info.get('identifier', None), 'mac_address': None, 'ipv4': port_info.get('ipv4_address'), 'ipv4_mask': port_info.get('ipv4_netmask'), 'ipv6': port_info.get('ipv6_address'), 'ipv6_mask': None, } port_list.append(port_dict) except Exception: LOG.error("Failed to get port list for director: {}" .format(director)) return port_list def list_disks(self, storage_id): if int(self.uni_version) < 91: return [] try: # Get list of Disks disks = self.rest.get_disk_list(self.array_id[storage_id], self.uni_version) disk_list = [] for disk in disks: disk_info = self.rest.get_disk( self.array_id[storage_id], self.uni_version, disk) disk_item = { 'name': disk, 'storage_id': storage_id, 'native_disk_id': disk, 'manufacturer': disk_info['vendor'], 'capacity': int(disk_info['capacity']) * units.Gi, } disk_list.append(disk_item) return disk_list except Exception: LOG.error("Failed to get disk details from VMAX") raise def list_storage_host_initiators(self, storage_id): try: # Get list of initiators initiators = self.rest.get_initiator_list( self.array_id[storage_id], self.uni_version) initiator_list = [] for initiator in initiators: initiator_info = self.rest.get_initiator( self.array_id[storage_id], self.uni_version, initiator) type_string = initiator_info.get('type', '').upper() initiator_type = constants.InitiatorType.UNKNOWN if 'FIBRE' in type_string: initiator_type = constants.InitiatorType.FC if 'ISCSI' in type_string: initiator_type = constants.InitiatorType.ISCSI initiator_status = constants.InitiatorStatus.ONLINE if not initiator_info.get('on_fabric', False): initiator_status = constants.InitiatorStatus.OFFLINE initiator_item = { 'name': initiator, 'storage_id': storage_id, 'native_storage_host_initiator_id': initiator, 'alias': initiator_info.get('alias'), 'wwn': initiator_info.get('initiatorId'), 'type': initiator_type, 'status': initiator_status, 'native_storage_host_id': initiator_info.get('host'), } initiator_list.append(initiator_item) return initiator_list except Exception: LOG.error("Failed to get host initiator details from VMAX") raise def list_storage_hosts(self, storage_id): try: # Get list of storage hosts hosts = self.rest.get_host_list(self.array_id[storage_id], self.uni_version) host_list = [] for host in hosts: host_info = self.rest.get_host( self.array_id[storage_id], self.uni_version, host) host_item = { 'storage_id': storage_id, 'native_storage_host_id': host_info.get('hostId'), 'name': host_info.get('hostId'), 'os_type': constants.HostOSTypes.UNKNOWN, 'status': constants.HostStatus.NORMAL, } host_list.append(host_item) return host_list except Exception: LOG.error("Failed to get storage host details from VMAX") raise def list_storage_host_groups(self, storage_id): try: # Get list of storage host groups host_groups = self.rest.get_host_group_list( self.array_id[storage_id], self.uni_version) host_group_list = [] storage_host_grp_relation_list = [] for host_group in host_groups: host_group_info = self.rest.get_host_group( self.array_id[storage_id], self.uni_version, host_group) host_group_item = { 'name': host_group, 'storage_id': storage_id, 'native_storage_host_group_id': host_group, } host_group_list.append(host_group_item) for storage_host in host_group_info['host']: storage_host_group_relation = { 'storage_id': storage_id, 'native_storage_host_group_id': host_group, 'native_storage_host_id': storage_host.get('hostId') } storage_host_grp_relation_list \ .append(storage_host_group_relation) result = { 'storage_host_groups': host_group_list, 'storage_host_grp_host_rels': storage_host_grp_relation_list } return result except Exception: LOG.error("Failed to get storage host group details from VMAX") raise def list_port_groups(self, storage_id): try: # Get list of port groups port_groups = self.rest.get_port_group_list( self.array_id[storage_id], self.uni_version) port_group_list = [] port_group_relation_list = [] for port_group in port_groups: port_group_info = self.rest.get_port_group( self.array_id[storage_id], self.uni_version, port_group) port_group_item = { 'name': port_group, 'storage_id': storage_id, 'native_port_group_id': port_group, } port_group_list.append(port_group_item) for port in port_group_info['symmetrixPortKey']: port_name = port['directorId'] + ':' + port['portId'] port_group_relation = { 'storage_id': storage_id, 'native_port_group_id': port_group, 'native_port_id': port_name } port_group_relation_list.append(port_group_relation) result = { 'port_groups': port_group_list, 'port_grp_port_rels': port_group_relation_list } return result except Exception: LOG.error("Failed to get port group details from VMAX") raise def list_volume_groups(self, storage_id): try: # Get list of volume groups volume_groups = self.rest.get_volume_group_list( self.array_id[storage_id], self.uni_version) volume_group_list = [] volume_group_relation_list = [] for volume_group in volume_groups: # volume_group_info = self.rest.get_volume_group( # self.array_id, self.uni_version, volume_group) volume_group_item = { 'name': volume_group, 'storage_id': storage_id, 'native_volume_group_id': volume_group, } volume_group_list.append(volume_group_item) # List all volumes except data volumes volumes = self.rest.get_volume_list( self.array_id[storage_id], version=self.uni_version, params={'data_volume': 'false', 'storageGroupId': volume_group}) if not volumes: continue for volume in volumes: volume_group_relation = { 'storage_id': storage_id, 'native_volume_group_id': volume_group, 'native_volume_id': volume } volume_group_relation_list.append(volume_group_relation) result = { 'volume_groups': volume_group_list, 'vol_grp_vol_rels': volume_group_relation_list } return result except Exception: LOG.error("Failed to get volume group details from VMAX") raise def list_masking_views(self, storage_id): try: # Get list of masking_views masking_views = self.rest.get_masking_view_list( self.array_id[storage_id], self.uni_version) masking_view_list = [] for masking_view in masking_views: mv_info = self.rest.get_masking_view( self.array_id[storage_id], self.uni_version, masking_view) masking_view_item = { 'name': masking_view, 'storage_id': storage_id, 'native_masking_view_id': mv_info['maskingViewId'], 'native_storage_host_id': mv_info.get('hostId'), 'native_storage_host_group_id': mv_info.get( 'hostGroupId'), 'native_volume_group_id': mv_info.get('storageGroupId'), 'native_port_group_id': mv_info.get('portGroupId'), } masking_view_list.append(masking_view_item) return masking_view_list except Exception: LOG.error("Failed to get masking views details from VMAX") raise def list_alerts(self, storage_id, query_para): """Get all alerts from an array.""" return self.rest.get_alerts(query_para, version=self.uni_version, array=self.array_id[storage_id]) def clear_alert(self, storage_id, sequence_number): """Clear alert for given sequence number.""" return self.rest.clear_alert(sequence_number, version=self.uni_version, array=self.array_id[storage_id]) def get_storage_metrics(self, storage_id, metrics, start_time, end_time): """Get performance metrics.""" try: perf_list = self.rest.get_storage_metrics( self.array_id[storage_id], metrics, start_time, end_time) return perf_utils.construct_metrics(storage_id, consts.STORAGE_METRICS, consts.STORAGE_CAP, perf_list) except Exception: LOG.error("Failed to get STORAGE metrics for VMAX") raise def get_pool_metrics(self, storage_id, metrics, start_time, end_time): """Get performance metrics.""" try: perf_list = self.rest.get_pool_metrics( self.array_id[storage_id], metrics, start_time, end_time) metrics_array = perf_utils.construct_metrics( storage_id, consts.POOL_METRICS, consts.POOL_CAP, perf_list) return metrics_array except Exception: LOG.error("Failed to get STORAGE POOL metrics for VMAX") raise def get_port_metrics(self, storage_id, metrics, start_time, end_time): """Get performance metrics.""" try: be_perf_list, fe_perf_list, rdf_perf_list = \ self.rest.get_port_metrics(self.array_id[storage_id], metrics, start_time, end_time) metrics_array = [] metrics_list = perf_utils.construct_metrics( storage_id, consts.BEPORT_METRICS, consts.PORT_CAP, be_perf_list) metrics_array.extend(metrics_list) metrics_list = perf_utils.construct_metrics( storage_id, consts.FEPORT_METRICS, consts.PORT_CAP, fe_perf_list) metrics_array.extend(metrics_list) metrics_list = perf_utils.construct_metrics( storage_id, consts.RDFPORT_METRICS, consts.PORT_CAP, rdf_perf_list) metrics_array.extend(metrics_list) return metrics_array except Exception: LOG.error("Failed to get PORT metrics for VMAX") raise def get_controller_metrics(self, storage_id, metrics, start_time, end_time): """Get performance metrics.""" try: be_perf_list, fe_perf_list, rdf_perf_list = self.rest.\ get_controller_metrics(self.array_id[storage_id], metrics, start_time, end_time) metrics_array = [] metrics_list = perf_utils.construct_metrics( storage_id, consts.BEDIRECTOR_METRICS, consts.CONTROLLER_CAP, be_perf_list) metrics_array.extend(metrics_list) metrics_list = perf_utils.construct_metrics( storage_id, consts.FEDIRECTOR_METRICS, consts.CONTROLLER_CAP, fe_perf_list) metrics_array.extend(metrics_list) metrics_list = perf_utils.construct_metrics( storage_id, consts.RDFDIRECTOR_METRICS, consts.CONTROLLER_CAP, rdf_perf_list) metrics_array.extend(metrics_list) return metrics_array except Exception: LOG.error("Failed to get CONTROLLER metrics for VMAX") raise def get_disk_metrics(self, storage_id, metrics, start_time, end_time): """Get disk performance metrics.""" if int(self.uni_version) < 91: return [] try: perf_list = self.rest.get_disk_metrics( self.array_id[storage_id], metrics, start_time, end_time) metrics_array = perf_utils.construct_metrics( storage_id, consts.DISK_METRICS, consts.DISK_CAP, perf_list) return metrics_array except Exception: LOG.error("Failed to get DISK metrics for VMAX") raise ================================================ FILE: delfin/drivers/dell_emc/vmax/constants.py ================================================ # Copyright 2020 The SODA Authors. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # minimum interval supported by VMAX VMAX_PERF_MIN_INTERVAL = 5 BEDIRECTOR_METRICS = { 'iops': 'IOs', 'throughput': 'MBs', 'readThroughput': 'MBRead', 'writeThroughput': 'MBWritten', } FEDIRECTOR_METRICS = { 'iops': 'HostIOs', 'throughput': 'HostMBs', } RDFDIRECTOR_METRICS = { 'iops': 'IOs', 'throughput': 'MBSentAndReceived', 'readThroughput': 'MBRead', 'writeThroughput': 'MBWritten', 'responseTime': 'AverageIOServiceTime', } BEPORT_METRICS = { 'iops': 'IOs', 'throughput': 'MBs', 'readThroughput': 'MBRead', 'writeThroughput': 'MBWritten', } FEPORT_METRICS = { 'iops': 'IOs', 'throughput': 'MBs', 'readThroughput': 'MBRead', 'writeThroughput': 'MBWritten', 'responseTime': 'ResponseTime', } RDFPORT_METRICS = { 'iops': 'IOs', 'throughput': 'MBs', 'readThroughput': 'MBRead', 'writeThroughput': 'MBWritten', } DISK_METRICS = { 'iops': 'IOs', 'throughput': 'MBs', 'readThroughput': 'MBReads', 'writeThroughput': 'MBWritten', 'responseTime': 'AvgResponseTime', } POOL_METRICS = { 'iops': 'HostIOs', 'readIops': 'HostReads', 'writeIops': 'HostWrites', 'throughput': 'HostMBs', 'readThroughput': 'HostMBReads', 'writeThroughput': 'HostMBWritten', 'responseTime': 'ResponseTime', } STORAGE_METRICS = { 'iops': 'HostIOs', 'readIops': 'HostReads', 'writeIops': 'HostWrites', 'throughput': 'HostMBs', 'readThroughput': 'HostMBReads', 'writeThroughput': 'HostMBWritten', } IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Input/output operations per second" } READ_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Read input/output operations per second" } WRITE_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Write input/output operations per second" } THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data is " "successfully transferred in MB/s" } READ_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data read is " "successfully transferred in MB/s" } WRITE_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data write is " "successfully transferred in MB/s" } RESPONSE_TIME_DESCRIPTION = { "unit": "ms", "description": "Average time taken for an IO " "operation in ms" } IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of IO requests in KB" } READ_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of read IO requests in KB" } WRITE_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of write IO requests in KB" } STORAGE_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } POOL_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } CONTROLLER_CAP = { "iops": IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } PORT_CAP = { "iops": IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } DISK_CAP = { "iops": IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } ================================================ FILE: delfin/drivers/dell_emc/vmax/perf_utils.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants def parse_performance_data(metrics): """Parse metrics response to a map :param metrics: metrics from unispshere REST API :returns: map with key as metric name and value as dictionary containing {timestamp: value} for a the timestamps available """ metrics_map = {} timestamp = metrics["timestamp"] for key, value in metrics.items(): metrics_map[key] = metrics_map.get(key, {}) metrics_map[key][timestamp] = value return metrics_map def construct_metrics(storage_id, resource_metrics, unit_map, perf_list): metrics_list = [] metrics_values = {} for perf in perf_list: collected_metrics_list = perf.get('metrics') for collected_metrics in collected_metrics_list: metrics_map = parse_performance_data(collected_metrics) for key, value in resource_metrics.items(): metrics_map_value = metrics_map.get(value) if metrics_map_value: metrics_values[key] = metrics_values.get(key, {}) for k, v in metrics_map_value.items(): metrics_values[key][k] = v for resource_key, resource_value in metrics_values.items(): labels = { 'storage_id': storage_id, 'resource_type': perf.get('resource_type'), 'resource_id': perf.get('resource_id'), 'resource_name': perf.get('resource_name'), 'type': 'RAW', 'unit': unit_map[resource_key]['unit'] } metrics_res = constants.metric_struct(name=resource_key, labels=labels, values=resource_value) metrics_list.append(metrics_res) return metrics_list ================================================ FILE: delfin/drivers/dell_emc/vmax/rest.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import sys import requests import requests.auth import requests.exceptions as r_exc import six import urllib3 from oslo_log import log as logging from delfin import cryptor from delfin import exception from delfin import ssl_utils from delfin.common import alert_util from delfin.common import constants as delfin_const from delfin.drivers.dell_emc.vmax import constants from delfin.i18n import _ LOG = logging.getLogger(__name__) PERFORMANCE = 'performance' SLOPROVISIONING = 'sloprovisioning' SYSTEM = 'system' SYMMETRIX = 'symmetrix' DIRECTOR = 'director' PORT = 'port' U4V_VERSION = '92' UCODE_5978 = '5978' # HTTP constants GET = 'GET' POST = 'POST' PUT = 'PUT' DELETE = 'DELETE' STATUS_200 = 200 STATUS_201 = 201 STATUS_202 = 202 STATUS_204 = 204 STATUS_401 = 401 # Default expiration time(in sec) for vmax connect request VERSION_GET_TIME_OUT = 10 class VMaxRest(object): """Rest class based on Unisphere for VMax Rest API.""" def __init__(self): self.session = None self.base_uri = None self.user = None self.passwd = None self.verify = None urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) def set_rest_credentials(self, array_info): """Given the array record set the rest server credentials. :param array_info: record """ ip = array_info['host'] port = array_info['port'] self.user = array_info['username'] self.passwd = array_info['password'] ip_port = "%(ip)s:%(port)d" % {'ip': ip, 'port': port} self.base_uri = ("https://%(ip_port)s/univmax/restapi" % { 'ip_port': ip_port}) def establish_rest_session(self): """Establish the rest session. :returns: requests.session() -- session, the rest session """ LOG.info("Establishing REST session with %(base_uri)s", {'base_uri': self.base_uri}) if self.session: self.session.close() session = requests.session() session.headers = {'content-type': 'application/json', 'accept': 'application/json', 'Application-Type': 'delfin'} session.auth = requests.auth.HTTPBasicAuth( self.user, cryptor.decode(self.passwd)) if not self.verify: session.verify = False else: LOG.debug("Enable certificate verification, ca_path: {0}".format( self.verify)) session.verify = self.verify session.mount("https://", ssl_utils.get_host_name_ignore_adapter()) self.session = session return session def request(self, target_uri, method, params=None, request_object=None, timeout=None): """Sends a request (GET, POST, PUT, DELETE) to the target api. :param target_uri: target uri (string) :param method: The method (GET, POST, PUT, or DELETE) :param params: Additional URL parameters :param request_object: request payload (dict) :param timeout: expiration timeout(in sec) :returns: server response object (dict) :raises: StorageBackendException, Timeout, ConnectionError, HTTPError, SSLError """ url, message, status_code, response = None, None, None, None if not self.session: self.establish_rest_session() try: url = ("%(self.base_uri)s%(target_uri)s" % { 'self.base_uri': self.base_uri, 'target_uri': target_uri}) if request_object: response = self.session.request( method=method, url=url, data=json.dumps(request_object, sort_keys=True, indent=4), timeout=timeout) elif params: response = self.session.request( method=method, url=url, params=params, timeout=timeout) else: response = self.session.request( method=method, url=url, timeout=timeout) status_code = response.status_code try: message = response.json() except ValueError: LOG.debug("No response received from API. Status code " "received is: %(status_code)s", { 'status_code': status_code}) message = None LOG.debug("%(method)s request to %(url)s has returned with " "a status code of: %(status_code)s.", { 'method': method, 'url': url, 'status_code': status_code}) except r_exc.SSLError as e: msg = _("The connection to %(base_uri)s has encountered an " "SSL error. Please check your SSL config or supplied " "SSL cert in Delfin configuration. SSL Exception " "message: %(e)s") % {'base_uri': self.base_uri, 'e': e} LOG.error(msg) err_str = six.text_type(e) if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: raise exception.SSLHandshakeFailed() except (r_exc.Timeout, r_exc.ConnectionError, r_exc.HTTPError) as e: exc_class, __, __ = sys.exc_info() msg = _("The %(method)s to Unisphere server %(base)s has " "experienced a %(error)s error. Please check your " "Unisphere server connection/availability. " "Exception message: %(exc_msg)s") raise exc_class(msg % {'method': method, 'base': self.base_uri, 'error': e.__class__.__name__, 'exc_msg': e}) except Exception as e: msg = _("The %(method)s request to URL %(url)s failed with " "exception %(e)s") LOG.error(msg, {'method': method, 'url': url, 'e': six.text_type(e)}) raise exception.StorageBackendException( message=(msg, {'method': method, 'url': url, 'e': six.text_type(e)})) return status_code, message @staticmethod def check_status_code_success(operation, status_code, message): """Check if a status code indicates success. :param operation: the operation :param status_code: the status code :param message: the server response :raises: StorageBackendException """ if status_code not in [STATUS_200, STATUS_201, STATUS_202, STATUS_204]: exception_message = ( _("Error %(operation)s. The status code received is %(sc)s " "and the message is %(message)s.") % { 'operation': operation, 'sc': str(status_code), 'message': message}) raise exception.StorageBackendException( message=exception_message) def build_uri(self, *args, **kwargs): """Build the target url. :param args: input args, see _build_uri_legacy_args() for input breakdown :param kwargs: input keyword args, see _build_uri_kwargs() for input breakdown :return: target uri -- str """ if args: target_uri = self._build_uri_legacy_args(*args, **kwargs) else: target_uri = self._build_uri_kwargs(**kwargs) return target_uri @staticmethod def _build_uri_legacy_args(*args, **kwargs): """Build the target URI using legacy args & kwargs. Expected format: arg[0]: the array serial number: the array serial number -- str arg[1]: the resource category e.g. 'sloprovisioning' -- str arg[2]: the resource type e.g. 'maskingview' -- str kwarg resource_name: the name of a specific resource -- str kwarg private: if endpoint is private -- bool kwarg version: U4V REST endpoint version -- int/str kwarg no_version: if endpoint should be versionless -- bool :param args: input args -- see above :param kwargs: input keyword args -- see above :return: target URI -- str """ # Extract args following legacy _build_uri() format array_id, category, resource_type = args[0], args[1], args[2] # Extract keyword args following legacy _build_uri() format resource_name = kwargs.get('resource_name') private = kwargs.get('private') version = kwargs.get('version', U4V_VERSION) if kwargs.get('no_version'): version = None # Build URI target_uri = '' if private: target_uri += '/private' if version: target_uri += '/%(version)s' % {'version': version} target_uri += ( '/{cat}/symmetrix/{array_id}/{res_type}'.format( cat=category, array_id=array_id, res_type=resource_type)) if resource_name: target_uri += '/{resource_name}'.format( resource_name=kwargs.get('resource_name')) return target_uri @staticmethod def _build_uri_kwargs(**kwargs): """Build the target URI using kwargs. Expected kwargs: private: if endpoint is private (optional) -- bool version: U4P REST endpoint version (optional) -- int/None no_version: if endpoint should be versionless (optional) -- bool category: U4P REST category eg. 'common', 'replication'-- str resource_level: U4P REST resource level eg. 'symmetrix' (optional) -- str resource_level_id: U4P REST resource level id (optional) -- str resource_type: U4P REST resource type eg. 'rdf_director', 'host' (optional) -- str resource_type_id: U4P REST resource type id (optional) -- str resource: U4P REST resource eg. 'port' (optional) -- str resource_id: U4P REST resource id (optional) -- str object_type: U4P REST resource eg. 'rdf_group' (optional) -- str object_type_id: U4P REST resource id (optional) -- str :param kwargs: input keyword args -- see above :return: target URI -- str """ version = kwargs.get('version', U4V_VERSION) if kwargs.get('no_version'): version = None target_uri = '' if kwargs.get('private'): target_uri += '/private' if version: target_uri += '/%(ver)s' % {'ver': version} target_uri += '/%(cat)s' % {'cat': kwargs.get('category')} if kwargs.get('resource_level'): target_uri += '/%(res_level)s' % { 'res_level': kwargs.get('resource_level')} if kwargs.get('resource_level_id'): target_uri += '/%(res_level_id)s' % { 'res_level_id': kwargs.get('resource_level_id')} if kwargs.get('resource_type'): target_uri += '/%(res_type)s' % { 'res_type': kwargs.get('resource_type')} if kwargs.get('resource_type_id'): target_uri += '/%(res_type_id)s' % { 'res_type_id': kwargs.get('resource_type_id')} if kwargs.get('resource'): target_uri += '/%(res)s' % { 'res': kwargs.get('resource')} if kwargs.get('resource_id'): target_uri += '/%(res_id)s' % { 'res_id': kwargs.get('resource_id')} if kwargs.get('object_type'): target_uri += '/%(object_type)s' % { 'object_type': kwargs.get('object_type')} if kwargs.get('object_type_id'): target_uri += '/%(object_type_id)s' % { 'object_type_id': kwargs.get('object_type_id')} return target_uri def get_request(self, target_uri, resource_type, params=None): """Send a GET request to the array. :param target_uri: the target uri :param resource_type: the resource type, e.g. maskingview :param params: optional dict of filter params :returns: resource_object -- dict or None """ resource_object = None sc, message = self.request(target_uri, GET, params=params) operation = 'get %(res)s' % {'res': resource_type} try: self.check_status_code_success(operation, sc, message) except Exception as e: LOG.debug("Get resource failed with %(e)s", {'e': e}) if sc == STATUS_200: resource_object = message resource_object = self.list_pagination(resource_object) return resource_object def get_alert_request(self, target_uri): """Send a GET request to the array. :param target_uri: the target uri :returns: resource_object -- dict or None """ sc, message = self.request(target_uri, GET, params=None) if sc != STATUS_200: raise exception.StorageListAlertFailed(message) resource_object = message resource_object = self.list_pagination(resource_object) return resource_object def get_resource(self, array, category, resource_type, resource_name=None, params=None, private=False, version=U4V_VERSION): """Get resource details from array. :param array: the array serial number :param category: the resource category e.g. sloprovisioning :param resource_type: the resource type e.g. maskingview :param resource_name: the name of a specific resource :param params: query parameters :param private: empty string or '/private' if private url :param version: None or specific version number if required :returns: resource object -- dict or None """ target_uri = self.build_uri( array, category, resource_type, resource_name=resource_name, private=private, version=version) return self.get_request(target_uri, resource_type, params) def get_resource_kwargs(self, *args, **kwargs): """Get resource details from the array. :key version: Unisphere version -- int :key no_version: if versionless uri -- bool :key category: the resource category e.g. sloprovisioning :key resource_level: resource level e.g. storagegroup :key resource_level_id: resource level id :key resource_type: optional resource type e.g. maskingview :key resource_type_id: optional resource type id :key resource: the name of a specific resource :key resource_id: the name of a specific resource :key object_type: optional name of resource :key object_type_id: optional name of resource :key params: query parameters -- dict :key private: empty string or '/private' if private url :returns: resource object -- dict or None """ resource_type = None if args: resource_type = args[2] elif kwargs: resource_type = kwargs.get('resource_level') target_uri = self.build_uri(*args, **kwargs) return self.get_request( target_uri, resource_type, kwargs.get('params')) def get_array_detail(self, version=U4V_VERSION, array=''): """Get an array from its serial number. :param array: the array serial number :param version: the unisphere version :returns: array_details -- dict or None """ target_uri = '/%s/system/symmetrix/%s' % (version, array) array_details = self.get_request(target_uri, 'system') if not array_details: LOG.error("Cannot connect to array %(array)s.", {'array': array}) return array_details def get_uni_version(self): """Get the unisphere version from the server. :returns: version and major_version(e.g. ("V8.4.0.16", "84")) """ version, major_version = None, None response = self.get_unisphere_version() if response and response.get('version'): version = response['version'] version_list = version.split('.') major_version = version_list[0][1] + version_list[1] return version, major_version def get_unisphere_version(self): """Get the unisphere version from the server. :returns: version dict """ post_90_endpoint = '/version' pre_91_endpoint = '/system/version' status_code, version_dict = self.request( post_90_endpoint, GET, timeout=VERSION_GET_TIME_OUT) if status_code is not STATUS_200: status_code, version_dict = self.request( pre_91_endpoint, GET, timeout=VERSION_GET_TIME_OUT) if status_code == STATUS_401: raise exception.InvalidUsernameOrPassword() if not version_dict: LOG.error("Unisphere version info not found.") return version_dict def get_srp_by_name(self, array, version, srp=None): """Returns the details of a storage pool. :param array: the array serial number :param version: the unisphere version :param srp: the storage resource pool name :returns: SRP_details -- dict or None """ LOG.debug("storagePoolName: %(srp)s, array: %(array)s.", {'srp': srp, 'array': array}) srp_details = self.get_resource(array, SLOPROVISIONING, 'srp', resource_name=srp, version=version, params=None) return srp_details def get_vmax_array_details(self, version=U4V_VERSION, array=''): """Get the VMax array properties. :param version: the unisphere version :param array: the array serial number :returns: the VMax model """ system_info = self.get_array_detail(version, array) vmax_model = system_info.get('model', 'VMAX') vmax_ucode = system_info.get('ucode') vmax_display_name = system_info.get('display_name', vmax_model) array_details = {"model": vmax_model, "ucode": vmax_ucode, "display_name": vmax_display_name} return array_details def get_array_model_info(self, version=U4V_VERSION, array=''): """Get the VMax model. :param version: the unisphere version :param array: the array serial number :returns: the VMax model """ is_next_gen = False system_info = self.get_array_detail(version, array) array_model = system_info.get('model', None) ucode_version = system_info['ucode'].split('.')[0] if ucode_version >= UCODE_5978: is_next_gen = True return array_model, is_next_gen def get_storage_group(self, array, version, storage_group_name): """Given a name, return storage group details. :param version: the unisphere version :param array: the array serial number :param storage_group_name: the name of the storage group :returns: storage group dict or None """ return self.get_resource( array, SLOPROVISIONING, 'storagegroup', version=version, resource_name=storage_group_name) def get_system_capacity(self, array, version): target_uri = '/%s/sloprovisioning/symmetrix/%s' % (version, array) capacity_details = self.get_request(target_uri, None) if not capacity_details: LOG.error("Cannot connect to array %(array)s.", {'array': array}) return capacity_details def get_default_srps(self, array, version=U4V_VERSION): """Get the VMax array default SRPs. :param version: the unisphere version :param array: the array serial number :returns: dictionary default SRPs """ symmetrix_info = self.get_system_capacity(array, version) default_fba_srp = symmetrix_info.get('default_fba_srp', None) default_ckd_srp = symmetrix_info.get('default_ckd_srp', None) default_srps = {"FBA": default_fba_srp, "CKD": default_ckd_srp} return default_srps def get_volume(self, array, version, device_id): """Get a VMax volume from array. :param array: the array serial number :param device_id: the volume device id :returns: volume dict :raises: StorageBackendException """ volume_dict = self.get_resource( array, SLOPROVISIONING, 'volume', resource_name=device_id, version=version) if not volume_dict: exception_message = (_("Volume %(deviceID)s not found.") % {'deviceID': device_id}) LOG.error(exception_message) raise exception.StorageBackendException( message=exception_message) return volume_dict def get_volume_list(self, array, version, params): """Get a filtered list of VMax volumes from array. Filter parameters are required as the unfiltered volume list could be very large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: device_ids -- list """ device_ids = [] volume_dict_list = self.get_resource( array, SLOPROVISIONING, 'volume', version=version, params=params) try: for vol_dict in volume_dict_list: device_id = vol_dict['volumeId'] device_ids.append(device_id) except (KeyError, TypeError): pass return device_ids def get_director(self, array, version, device_id): """Get a VMAX director from array. :param array: the array serial number :param version: the unisphere version :param device_id: the volume device id :returns: volume dict :raises: ControllerNotFound """ director_dict = None # Unisphere versions 90 and above if int(version) > 84: director_dict = self.get_resource( array, SYSTEM, 'director', resource_name=device_id, version=version) # Unisphere versions 84 if int(version) == 84: director_dict = self.get_resource( array, SLOPROVISIONING, 'director', resource_name=device_id, version=version) if int(version) < 84: LOG.error("Director is not supported in Unisphere version < 8.4") return None if not director_dict: exception_message = (_("Director %(deviceID)s not found.") % {'deviceID': device_id}) LOG.error(exception_message) raise exception.ControllerNotFound(device_id) return director_dict def get_director_list(self, array, version, params=None): """Get a filtered list of VMAX controllers from array. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: directors -- list """ response = None # Unisphere versions 90 and above if int(version) > 84: response = self.get_resource( array, SYSTEM, 'director', version=version, params=params) # Unisphere versions 84 if int(version) == 84: response = self.get_resource( array, SLOPROVISIONING, 'director', version=version, params=params) if int(version) < 84: LOG.error("Director not supported in Unisphere version < 8.4") return [] if not response: exception_message = (_("Get Director list failed.") % {'deviceID': array}) LOG.error(exception_message) raise exception.ControllerListNotFound(array) return response.get('directorId', list()) if response else list() def get_port(self, array, version, director_id, port_id): """Get a VMAX director from array. :param array: the array serial number :param version: the unisphere version -- int :param director_id: the director id :param port_id: the port id :returns: volume dict :raises: ControllerNotFound """ port_dict = None # Unisphere versions 90 and above if int(version) > 84: port_dict = self.get_resource_kwargs( category=SYSTEM, version=version, resource_level=SYMMETRIX, resource_level_id=array, resource_type=DIRECTOR, resource_type_id=director_id, resource=PORT, resource_id=port_id) # Unisphere versions 84 if int(version) == 84: port_dict = self.get_resource_kwargs( category=SLOPROVISIONING, version=version, resource_level=SYMMETRIX, resource_level_id=array, resource_type=DIRECTOR, resource_type_id=director_id, resource=PORT, resource_id=port_id) if int(version) < 84: LOG.error("Port get is not supported in Unisphere version < 8.4") return None if not port_dict: exception_message = (_("Port %(deviceID)s not found.") % {'deviceID': port_id}) LOG.error(exception_message) raise exception.PortNotFound(port_id) return port_dict def get_port_list(self, array, version, director_id, params=None): """Get a filtered list of VMAX controllers from array. :param array: the array serial number :param version: the unisphere version -- int :param params: filter parameters :param director_id: director id :returns: device_ids -- list """ response = None # Unisphere versions 90 and above if int(version) > 84: response = self.get_resource_kwargs( category=SYSTEM, version=version, resource_level=SYMMETRIX, resource_level_id=array, resource_type=DIRECTOR, resource_type_id=director_id, resource=PORT, params=params) # Unisphere versions 84 if int(version) == 84: response = self.get_resource_kwargs( category=SLOPROVISIONING, version=version, resource_level=SYMMETRIX, resource_level_id=array, resource_type=DIRECTOR, resource_type_id=director_id, resource=PORT, params=params) if int(version) < 84: LOG.error("Port list not supported in Unisphere version < 8.4") return [] if not response: exception_message = (_("Get Port list failed.") % {'deviceID': array}) LOG.error(exception_message) raise exception.PortListNotFound(array) port_ids = response.get('symmetrixPortKey', list()) if response else list() return port_ids def get_disk(self, array, version, device_id): """Get a VMax disk from array. :param array: the array serial number :param version: the unisphere version -- int :param device_id: the disk device id :returns: disk dict :raises: StorageBackendException """ disk_dict = self.get_resource( array, SYSTEM, 'disk', resource_name=device_id, version=version) if not disk_dict: exception_message = (_("Disk %(deviceID)s not found.") % {'deviceID': device_id}) LOG.error(exception_message) raise exception.DiskNotFound(device_id) return disk_dict def get_disk_list(self, array, version, params=None): """Get a filtered list of VMax disks from array. Filter parameters are required as the unfiltered disk list could be very large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: disk_ids -- list """ disk_dict_list = self.get_resource( array, SYSTEM, 'disk', version=version, params=params) return disk_dict_list.get('disk_ids', []) def get_initiator(self, array, version, initiator_id): """Get a VMax initiator from array. :param array: the array serial number :param version: the unisphere version -- int :param initiator_id: the initiator id :returns: initiator dict :raises: StorageHostInitiatorNotFound """ initiator_dict = self.get_resource( array, SLOPROVISIONING, 'initiator', resource_name=initiator_id, version=version) if not initiator_dict: exception_message = (_("Initiator %(initiator_id)s not found.") % {'initiator_id': initiator_id}) LOG.error(exception_message) raise exception.StorageHostInitiatorNotFound(initiator_id) return initiator_dict def get_initiator_list(self, array, version, params=None): """Get a filtered list of VMax initiators from array. Filter parameters are required as the unfiltered initiator list could bevery large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: initiatorId -- list """ initiator_dict_list = self.get_resource( array, SLOPROVISIONING, 'initiator', version=version, params=params) return initiator_dict_list.get('initiatorId', []) def get_host(self, array, version, host_id): """Get a VMax host from array. :param array: the array serial number :param version: the unisphere version -- int :param host_id: the host id :returns: host dict :raises: StorageHostNotFound """ host_dict = self.get_resource( array, SLOPROVISIONING, 'host', resource_name=host_id, version=version) if not host_dict: exception_message = (_("Host %(host_id)s not found.") % {'host_id': host_id}) LOG.error(exception_message) raise exception.StorageHostNotFound(host_id) return host_dict def get_host_list(self, array, version, params=None): """Get a filtered list of VMax hosts from array. Filter parameters are required as the unfiltered host list could bevery large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: hostId -- list """ host_dict_list = self.get_resource( array, SLOPROVISIONING, 'host', version=version, params=params) return host_dict_list.get('hostId', []) def get_host_group(self, array, version, host_group_id): """Get a VMax host group from array. :param array: the array serial number :param version: the unisphere version -- int :param host_group_id: the host group id :returns: host group dict :raises: StorageHostGroupNotFound """ host_group_dict = self.get_resource( array, SLOPROVISIONING, 'hostgroup', resource_name=host_group_id, version=version) if not host_group_dict: exception_message = (_("HostGroup %(host_group_id)s not found.") % {'host_group_id': host_group_id}) LOG.error(exception_message) raise exception.StorageHostGroupNotFound(host_group_id) return host_group_dict def get_host_group_list(self, array, version, params=None): """Get a filtered list of VMax host groups from array. Filter parameters are required as the unfiltered host list could bevery large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: hostGroupId -- list """ host_group_dict_list = self.get_resource( array, SLOPROVISIONING, 'hostgroup', version=version, params=params) return host_group_dict_list.get('hostGroupId', []) def get_port_group(self, array, version, port_group_id): """Get a VMax port group from array. :param array: the array serial number :param version: the unisphere version -- int :param port_group_id: the port group id :returns: port group dict :raises: PortGroupNotFound """ port_group_dict = self.get_resource( array, SLOPROVISIONING, 'portgroup', resource_name=port_group_id, version=version) if not port_group_dict: exception_message = (_("PortGroup %(port_group_id)s not found.") % {'port_group_id': port_group_id}) LOG.error(exception_message) raise exception.PortGroupNotFound(port_group_id) return port_group_dict def get_port_group_list(self, array, version, params=None): """Get a filtered list of VMax port groups from array. Filter parameters are required as the unfiltered host list could bevery large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: portGroupId -- list """ port_group_dict_list = self.get_resource( array, SLOPROVISIONING, 'portgroup', version=version, params=params) return port_group_dict_list.get('portGroupId', []) def get_volume_group(self, array, version, storage_group_id): """Get a VMax storage/volume group from array. :param array: the array serial number :param version: the unisphere version -- int :param storage_group_id: the storage group id :returns: volume group dict :raises: VolumeGroupNotFound """ storage_group_dict = self.get_resource( array, SLOPROVISIONING, 'storagegroup', resource_name=storage_group_id, version=version) if not storage_group_dict: exception_message = (_("StorageGroup %(sid)s not found.") % {'id': storage_group_id}) LOG.error(exception_message) raise exception.VolumeGroupNotFound(storage_group_id) return storage_group_dict def get_volume_group_list(self, array, version, params=None): """Get a filtered list of VMax storage groups from array. Filter parameters are required as the unfiltered host list could bevery large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: storageGroupId -- list """ storage_group_dict_list = self.get_resource( array, SLOPROVISIONING, 'storagegroup', version=version, params=params) return storage_group_dict_list.get('storageGroupId', []) def get_masking_view(self, array, version, masking_view_id): """Get a VMax masking view from array. :param array: the array serial number :param version: the unisphere version -- int :param masking_view_id: the masking view id :returns: masking view dict :raises: MaskingViewNotFound """ masking_view_dict = self.get_resource( array, SLOPROVISIONING, 'maskingview', resource_name=masking_view_id, version=version) if not masking_view_dict: exception_message = (_("Masking View %(id)s not found.") % {'id': masking_view_id}) LOG.error(exception_message) raise exception.MaskingViewNotFound(masking_view_id) return masking_view_dict def get_masking_view_list(self, array, version, params=None): """Get a filtered list of VMax masking views from array. Filter parameters are required as the unfiltered initiator list could bevery large and could affect performance if called often. :param array: the array serial number :param version: the unisphere version :param params: filter parameters :returns: maskingViewId -- list """ masking_view_dict_list = self.get_resource( array, SLOPROVISIONING, 'maskingview', version=version, params=params) return masking_view_dict_list.get('maskingViewId', []) def post_request(self, target_uri, payload): """Generate a POST request. :param target_uri: the uri to query from unipshere REST API :param payload: the payload :returns: status_code -- int, message -- string, server response """ status_code, message = self.request(target_uri, POST, request_object=payload) resource_object = None if status_code == STATUS_200: resource_object = message resource_object = self.list_pagination(resource_object) operation = 'POST request for URL' self.check_status_code_success( operation, status_code, resource_object) return status_code, resource_object def get_array_keys(self, array): target_uri = '/performance/Array/keys' response = self.get_request(target_uri, PERFORMANCE, None) if response is None: err_msg = "Failed to get Array keys from VMAX: {0}"\ .format(str(array)) LOG.error(err_msg) return response def get_resource_keys(self, array, resource, payload=None): if payload is None: payload = {} payload['symmetrixId'] = str(array) target_uri = '/performance/{0}/keys'.format(resource) sc, response = self.post_request(target_uri, payload) if response is None: err_msg = "Failed to get {0} keys from VMAX: {1} status: {2}"\ .format(resource, str(array), sc) LOG.error(err_msg) return response def get_resource_metrics(self, array, start_time, end_time, resource, metrics, payload=None): if payload is None: payload = {} payload['symmetrixId'] = str(array) payload['startDate'] = start_time payload['endDate'] = end_time payload['metrics'] = metrics payload['dataFormat'] = 'Average' target_uri = '/performance/{0}/metrics'.format(resource) status_code, response = self.post_request(target_uri, payload) if status_code != STATUS_200: err_msg = "Failed to get {0} metrics from VMAX: {1}" \ .format(resource, str(array)) LOG.error(err_msg) return None return response def get_storage_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ storage_metrics = [] for k in metrics.keys(): vmax_key = constants.STORAGE_METRICS.get(k) if vmax_key: storage_metrics.append(vmax_key) keys = self.get_array_keys(array) keys_dict = None if keys: keys_dict = keys.get('arrayInfo', None) metrics_list = [] for key_dict in keys_dict: if key_dict.get('symmetrixId') == array: metrics_res = self.get_resource_metrics( array, start_time, end_time, 'Array', storage_metrics, payload=None) if metrics_res: label = { 'resource_id': key_dict.get('symmetrixId'), 'resource_name': 'VMAX' + key_dict.get('symmetrixId'), 'resource_type': delfin_const.ResourceType.STORAGE, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_pool_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ pool_metrics = [] for k in metrics.keys(): vmax_key = constants.POOL_METRICS.get(k) if vmax_key: pool_metrics.append(vmax_key) keys = self.get_resource_keys(array, 'SRP') keys_dict = None if keys: keys_dict = keys.get('srpInfo', None) metrics_list = [] for key_dict in keys_dict: payload = {'srpId': key_dict.get('srpId')} metrics_res = self.get_resource_metrics( array, start_time, end_time, 'SRP', pool_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('srpId'), 'resource_name': key_dict.get('srpId'), 'resource_type': delfin_const.ResourceType.STORAGE_POOL, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_fedirector_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ fedirector_metrics = [] for k in metrics.keys(): vmax_key = constants.FEDIRECTOR_METRICS.get(k) if vmax_key: fedirector_metrics.append(vmax_key) keys = self.get_resource_keys(array, 'FEDirector') keys_dict = None if keys: keys_dict = keys.get('feDirectorInfo', None) metrics_list = [] for key_dict in keys_dict: payload = {'directorId': key_dict.get('directorId')} metrics_res = self.get_resource_metrics( array, start_time, end_time, 'FEDirector', fedirector_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('directorId'), 'resource_name': 'FEDirector_' + key_dict.get('directorId'), 'resource_type': delfin_const.ResourceType.CONTROLLER, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_bedirector_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ bedirector_metrics = [] for k in metrics.keys(): vmax_key = constants.BEDIRECTOR_METRICS.get(k) if vmax_key: bedirector_metrics.append(vmax_key) keys = self.get_resource_keys(array, 'BEDirector') keys_dict = None if keys: keys_dict = keys.get('beDirectorInfo', None) metrics_list = [] for key_dict in keys_dict: payload = {'directorId': key_dict.get('directorId')} metrics_res = self.get_resource_metrics( array, start_time, end_time, 'BEDirector', bedirector_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('directorId'), 'resource_name': 'BEDirector_' + key_dict.get('directorId'), 'resource_type': delfin_const.ResourceType.CONTROLLER, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_rdfdirector_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ rdfdirector_metrics = [] for k in metrics.keys(): vmax_key = constants.RDFDIRECTOR_METRICS.get(k) if vmax_key: rdfdirector_metrics.append(vmax_key) keys = self.get_resource_keys(array, 'RDFDirector') keys_dict = None if keys: keys_dict = keys.get('rdfDirectorInfo', None) metrics_list = [] for key_dict in keys_dict: payload = {'directorId': key_dict.get('directorId')} metrics_res = self.get_resource_metrics( array, start_time, end_time, 'RDFDirector', rdfdirector_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('directorId'), 'resource_name': 'RDFDirector_' + key_dict.get('directorId'), 'resource_type': delfin_const.ResourceType.CONTROLLER, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_controller_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ be_metrics = self.get_bedirector_metrics( array, metrics, start_time, end_time) fe_metrics = self.get_fedirector_metrics( array, metrics, start_time, end_time) rdf_metrics = self.get_rdfdirector_metrics( array, metrics, start_time, end_time) return be_metrics, fe_metrics, rdf_metrics def get_feport_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ feport_metrics = [] for k in metrics.keys(): vmax_key = constants.FEPORT_METRICS.get(k) if vmax_key: feport_metrics.append(vmax_key) director_keys = self.get_resource_keys(array, 'FEDirector') director_keys_dict = None if director_keys: director_keys_dict = director_keys.get('feDirectorInfo', None) metrics_list = [] for director_key_dict in director_keys_dict: payload = {'directorId': director_key_dict.get('directorId')} keys = self.get_resource_keys(array, 'FEPort', payload=payload) keys_dict = None if keys: keys_dict = keys.get('fePortInfo', None) for key_dict in keys_dict: payload['portId'] = key_dict.get('portId') metrics_res = self.get_resource_metrics( array, start_time, end_time, 'FEPort', feport_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('portId'), 'resource_name': 'FEPort_' + director_key_dict.get('directorId') + '_' + key_dict.get('portId'), 'resource_type': delfin_const.ResourceType.PORT, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_beport_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ beport_metrics = [] for k in metrics.keys(): vmax_key = constants.BEPORT_METRICS.get(k) if vmax_key: beport_metrics.append(vmax_key) director_keys = self.get_resource_keys(array, 'BEDirector') director_keys_dict = None if director_keys: director_keys_dict = director_keys.get('beDirectorInfo', None) metrics_list = [] for director_key_dict in director_keys_dict: payload = {'directorId': director_key_dict.get('directorId')} keys = self.get_resource_keys(array, 'BEPort', payload=payload) keys_dict = None if keys: keys_dict = keys.get('bePortInfo', None) for key_dict in keys_dict: payload['portId'] = key_dict.get('portId') metrics_res = self.get_resource_metrics( array, start_time, end_time, 'BEPort', beport_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('portId'), 'resource_name': 'BEPort_' + director_key_dict.get('directorId') + '_' + key_dict.get('portId'), 'resource_type': delfin_const.ResourceType.PORT, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_rdfport_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ rdfport_metrics = [] for k in metrics.keys(): vmax_key = constants.RDFPORT_METRICS.get(k) if vmax_key: rdfport_metrics.append(vmax_key) director_keys = self.get_resource_keys(array, 'RDFDirector') director_keys_dict = None if director_keys: director_keys_dict = director_keys.get('rdfDirectorInfo', None) metrics_list = [] for director_key_dict in director_keys_dict: payload = {'directorId': director_key_dict.get('directorId')} keys = self.get_resource_keys(array, 'RDFPort', payload=payload) keys_dict = None if keys: keys_dict = keys.get('rdfPortInfo', None) for key_dict in keys_dict: payload['portId'] = key_dict.get('portId') metrics_res = self.get_resource_metrics( array, start_time, end_time, 'RDFPort', rdfport_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('portId'), 'resource_name': 'RDFPort_' + director_key_dict.get('directorId') + '_' + key_dict.get('portId'), 'resource_type': delfin_const.ResourceType.PORT, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def get_port_metrics(self, array, metrics, start_time, end_time): """Get a array performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ be_metrics = self.get_beport_metrics( array, metrics, start_time, end_time) fe_metrics = self.get_feport_metrics( array, metrics, start_time, end_time) rdf_metrics = self.get_rdfport_metrics( array, metrics, start_time, end_time) return be_metrics, fe_metrics, rdf_metrics def get_disk_metrics(self, array, metrics, start_time, end_time): """Get a disk performance metrics from VMAX unipshere REST API. :param array: the array serial number :param metrics: required metrics :param start_time: start time for collection :param end_time: end time for collection :returns: message -- response from unipshere REST API """ disk_metrics = [] for k in metrics.keys(): vmax_key = constants.DISK_METRICS.get(k) if vmax_key: disk_metrics.append(vmax_key) keys = self.get_resource_keys(array, 'Disk') keys_dict = None if keys: keys_dict = keys.get('diskInfo', None) metrics_list = [] for key_dict in keys_dict: payload = {'diskId': key_dict.get('diskId')} metrics_res = self.get_resource_metrics( array, start_time, end_time, 'Disk', disk_metrics, payload=payload) if metrics_res: label = { 'resource_id': key_dict.get('diskId'), 'resource_name': 'Disk_' + key_dict.get('diskId'), 'resource_type': delfin_const.ResourceType.DISK, 'metrics': metrics_res } metrics_list.append(label) return metrics_list def list_pagination(self, list_info): """Process lists under or over the maxPageSize :param list_info: the object list information :returns: the result list """ result_list = [] try: result_list = list_info['resultList']['result'] iterator_id = list_info['id'] list_count = list_info['count'] max_page_size = list_info['maxPageSize'] start_position = list_info['resultList']['from'] end_position = list_info['resultList']['to'] except (KeyError, TypeError): return list_info if list_count > max_page_size: LOG.info("More entries exist in the result list, retrieving " "remainder of results from iterator.") start_position = end_position + 1 if list_count < (end_position + max_page_size): end_position = list_count else: end_position += max_page_size iterator_response = self.get_iterator_page_list( iterator_id, list_count, start_position, end_position, max_page_size) result_list += iterator_response return result_list def get_iterator_page_list(self, iterator_id, result_count, start_position, end_position, max_page_size): """Iterate through response if more than one page available. :param iterator_id: the iterator ID :param result_count: the amount of results in the iterator :param start_position: position to begin iterator from :param end_position: position to stop iterator :param max_page_size: the max page size :returns: list -- merged results from multiple pages """ iterator_result = [] has_more_entries = True while has_more_entries: if start_position <= result_count <= end_position: end_position = result_count has_more_entries = False params = {'to': end_position, 'from': start_position} target_uri = ('/common/Iterator/%(iterator_id)s/page' % { 'iterator_id': iterator_id}) iterator_response = self.get_request(target_uri, 'iterator', params) try: iterator_result += iterator_response['result'] start_position += max_page_size end_position += max_page_size except (KeyError, TypeError): pass return iterator_result def get_alerts(self, query_para, array, version): """Get all alerts with given version and arrayid :param query_para: Contains optional begin and end time :param array: the array serial number :param version: the unisphere version :returns: alert_list -- dict or None """ target_uri = '/%s/system/symmetrix/%s/alert?acknowledged=false' \ % (version, array) # First get list of all alert ids alert_id_list = self.get_alert_request(target_uri) if not alert_id_list: # No current alert ids found return [] # For each alert id, get details of alert # Above list is prefixed with 'alertId' alert_id_list = alert_id_list['alertId'] alert_list = [] for alert_id in alert_id_list: target_uri = '/%s/system/symmetrix/%s/alert/%s' \ % (version, array, alert_id) alert = self.get_alert_request(target_uri) if alert is not None and alert_util.is_alert_in_time_range( query_para, alert['created_date_milliseconds']): alert_list.append(alert) return alert_list def clear_alert(self, sequence_number, array, version): """Clears alert for given sequence number :param sequence_number: unique id of the alert :param array: the array serial number :param version: the unisphere version :returns: result -- success/failure """ target_uri = '/%s/system/symmetrix/%s/alert/%s' \ % (version, array, sequence_number) status, message = self.request(target_uri, DELETE, params=None) if status != STATUS_204: raise exception.StorageClearAlertFailed(message) return status ================================================ FILE: delfin/drivers/dell_emc/vmax/vmax.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin.common import constants from delfin.drivers import driver from delfin.drivers.dell_emc.vmax import client from delfin.drivers.dell_emc.vmax import constants as consts from delfin.drivers.dell_emc.vmax.alert_handler import snmp_alerts from delfin.drivers.dell_emc.vmax.alert_handler import unisphere_alerts LOG = log.getLogger(__name__) class VMAXStorageDriver(driver.StorageDriver): """VMAXStorageDriver implement the DELL EMC Storage driver, """ def __init__(self, **kwargs): super().__init__(**kwargs) self.client = client.VMAXClient(**kwargs) self.client.init_connection(kwargs) self.add_storage(kwargs) def delete_storage(self, context): self.client.array_id.pop(context.storage_id) def add_storage(self, kwargs): self.client.add_storage(kwargs) def reset_connection(self, context, **kwargs): self.client.reset_connection(**kwargs) def get_storage(self, context): storage_id = context.storage_id # Get the VMAX model array_details = self.client.get_array_details(storage_id) model = array_details['model'] ucode = array_details['ucode'] display_name = array_details['display_name'] # Get Storage details for capacity info total_capacity, used_capacity, free_capacity,\ raw_capacity, subscribed_capacity = \ self.client.get_storage_capacity(storage_id) storage = { # Unisphere Rest API do not provide Array name . # Generate name by combining model and symmetrixId 'name': display_name, 'vendor': 'Dell EMC', 'description': '', 'model': model, 'firmware_version': ucode, 'status': constants.StorageStatus.NORMAL, 'serial_number': self.client.array_id[storage_id], 'location': '', 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': free_capacity, 'raw_capacity': raw_capacity, 'subscribed_capacity': subscribed_capacity } LOG.info("get_storage(), successfully retrieved storage details") return storage def list_storage_pools(self, context): return self.client.list_storage_pools(context.storage_id) def list_volumes(self, context): return self.client.list_volumes(context.storage_id) def list_controllers(self, context): return self.client.list_controllers(context.storage_id) def list_ports(self, context): return self.client.list_ports(context.storage_id) def list_disks(self, context): return self.client.list_disks(context.storage_id) def list_storage_host_initiators(self, context): return self.client.list_storage_host_initiators(context.storage_id) def list_storage_hosts(self, context): return self.client.list_storage_hosts(context.storage_id) def list_storage_host_groups(self, context): return self.client.list_storage_host_groups(context.storage_id) def list_port_groups(self, context): return self.client.list_port_groups(context.storage_id) def list_volume_groups(self, context): return self.client.list_volume_groups(context.storage_id) def list_masking_views(self, context): return self.client.list_masking_views(context.storage_id) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return snmp_alerts.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, sequence_number): return self.client.clear_alert(context.storage_id, sequence_number) def list_alerts(self, context, query_para): # 1. CM generated snmp_alerts # 2. SNMP Trap forwarder (specific 3rd IP) alert_list = self.client.list_alerts(context.storage_id, query_para) alert_model_list = unisphere_alerts.AlertHandler()\ .parse_queried_alerts(alert_list) return alert_model_list def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): metrics = [] try: # storage metrics if resource_metrics.get(constants.ResourceType.STORAGE): storage_metrics = self.client.get_storage_metrics( storage_id, resource_metrics.get(constants.ResourceType.STORAGE), start_time, end_time) metrics.extend(storage_metrics) # storage-pool metrics if resource_metrics.get(constants.ResourceType.STORAGE_POOL): pool_metrics = self.client.get_pool_metrics( storage_id, resource_metrics.get(constants.ResourceType.STORAGE_POOL), start_time, end_time) metrics.extend(pool_metrics) # controller metrics if resource_metrics.get(constants.ResourceType.CONTROLLER): controller_metrics = self.client.get_controller_metrics( storage_id, resource_metrics.get(constants.ResourceType.CONTROLLER), start_time, end_time) metrics.extend(controller_metrics) # port metrics if resource_metrics.get(constants.ResourceType.PORT): port_metrics = self.client.get_port_metrics( storage_id, resource_metrics.get(constants.ResourceType.PORT), start_time, end_time) metrics.extend(port_metrics) # disk metrics if resource_metrics.get(constants.ResourceType.DISK): disk_metrics = self.client.get_disk_metrics( storage_id, resource_metrics.get(constants.ResourceType.DISK), start_time, end_time) metrics.extend(disk_metrics) except Exception: LOG.error("Failed to collect metrics from VMAX") raise return metrics @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver""" return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.STORAGE: consts.STORAGE_CAP, constants.ResourceType.STORAGE_POOL: consts.POOL_CAP, constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP, } } ================================================ FILE: delfin/drivers/dell_emc/vnx/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/vnx/vnx_block/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/vnx/vnx_block/alert_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib from oslo_log import log as logging from delfin import exception, utils from delfin.common import constants from delfin.drivers.dell_emc.vnx.vnx_block import consts from delfin.i18n import _ LOG = logging.getLogger(__name__) class AlertHandler(object): @staticmethod def parse_alert(alert): try: alert_model = dict() alert_model['alert_id'] = AlertHandler.check_event_code( alert.get(consts.OID_MESSAGECODE)) alert_model['alert_name'] = alert.get(consts.OID_DETAILS) alert_model['severity'] = consts.TRAP_LEVEL_MAP.get( alert.get(consts.OID_SEVERITY), constants.Severity.INFORMATIONAL) alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['occur_time'] = utils.utcnow_ms() alert_model['description'] = alert.get(consts.OID_DETAILS) alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['match_key'] = hashlib.md5( alert.get(consts.OID_DETAILS, '').encode()).hexdigest() return alert_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing " "in alert message.")) raise exception.InvalidResults(msg) @staticmethod def check_event_code(event_code): if '0x' not in event_code: event_code = '0x%s' % event_code return event_code ================================================ FILE: delfin/drivers/dell_emc/vnx/vnx_block/component_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import csv import os import re import time import six from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import constants from delfin.drivers.dell_emc.vnx.vnx_block import consts from delfin.drivers.utils.tools import Tools LOG = log.getLogger(__name__) class ComponentHandler(object): def __init__(self, navi_handler): self.navi_handler = navi_handler def get_storage(self): domain = self.navi_handler.get_domain() agent = self.navi_handler.get_agent() status = constants.StorageStatus.NORMAL raw_cap = self.handle_disk_capacity() pool_capacity = self.handle_pool_capacity() if domain and agent and pool_capacity: result = { 'name': domain[0].get('node'), 'vendor': consts.EMCVNX_VENDOR, 'model': agent.get('model'), 'status': status, 'serial_number': agent.get('serial_no'), 'firmware_version': agent.get('revision'), 'total_capacity': pool_capacity.get('total_capacity'), 'raw_capacity': int(raw_cap), 'used_capacity': pool_capacity.get('used_capacity'), 'free_capacity': pool_capacity.get('free_capacity') } else: err_msg = "Get vnx storage info failed, domain: %s, agent: %s," \ " pool_capacity: %s" % (six.text_type(domain), six.text_type(agent), six.text_type(pool_capacity)) LOG.error(err_msg) raise exception.StorageBackendException(err_msg) return result def list_storage_pools(self, storage_id): pools = self.navi_handler.get_pools() pool_list = [] if pools: for pool in pools: if pool.get('pool_name') is not None: status = consts.STATUS_MAP.get( pool.get('state'), constants.StoragePoolStatus.OFFLINE) used_cap = float( pool.get("consumed_capacity_gbs")) * units.Gi free_cap = float( pool.get("available_capacity_gbs")) * units.Gi total_cap = float( pool.get("user_capacity_gbs")) * units.Gi subscribed_cap = float(pool.get( "total_subscribed_capacity_gbs")) * units.Gi p = { 'name': pool.get('pool_name'), 'storage_id': storage_id, 'native_storage_pool_id': str(pool.get('pool_id')), 'description': pool.get('description'), 'status': status, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': int(total_cap), 'subscribed_capacity': int(subscribed_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap) } pool_list.append(p) raid_groups = self.handle_raid_groups(storage_id) if raid_groups: pool_list.extend(raid_groups) return pool_list def handle_raid_groups(self, storage_id): raid_groups = self.navi_handler.get_raid_group() raid_list = [] if raid_groups: for raid in raid_groups: if raid.get('raidgroup_id') is not None: status = consts.STATUS_MAP.get( raid.get('raidgroup_state'), constants.StoragePoolStatus.OFFLINE) free_cap = float(raid.get( "free_capacity_blocks,non-contiguous")) total_cap = float( raid.get("logical_capacity_blocks")) used_cap = total_cap - free_cap p = { 'name': 'RAID Group %s' % raid.get('raidgroup_id'), 'storage_id': storage_id, 'native_storage_pool_id': '%s%s' % ( consts.RAID_GROUP_ID_PREFIX, raid.get('raidgroup_id')), 'status': status, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': int(total_cap * (units.Ki / 2)), 'used_capacity': int(used_cap * (units.Ki / 2)), 'free_capacity': int(free_cap * (units.Ki / 2)) } raid_list.append(p) return raid_list def handle_volume_from_pool(self, volumes, pool_ids, storage_id): volume_list = [] if volumes: for volume in volumes: if volume.get('name') is not None: status = consts.STATUS_MAP.get( volume.get('current_state'), constants.StoragePoolStatus.OFFLINE) orig_pool_name = volume.get('pool_name') vol_type = consts.VOL_TYPE_MAP.get( volume.get('is_thin_lun').lower()) volume_used_cap_str = volume.get('consumed_capacity_gbs') used_cap = 0 if volume_used_cap_str and volume_used_cap_str != 'N/A': used_cap = float(volume_used_cap_str) * units.Gi total_cap = float( volume.get('user_capacity_gbs')) * units.Gi free_cap = total_cap - used_cap if free_cap < 0: free_cap = 0 v = { 'name': volume.get('name'), 'storage_id': storage_id, 'status': status, 'native_volume_id': str(volume.get('lun_id')), 'native_storage_pool_id': pool_ids.get(orig_pool_name, ''), 'type': vol_type, 'total_capacity': int(total_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap), 'compressed': consts.VOL_COMPRESSED_MAP.get( volume.get('is_compressed').lower()), 'wwn': volume.get('uid') } volume_list.append(v) return volume_list def handle_volume_from_raid_group(self, storage_id): volume_list = [] volumes = self.navi_handler.get_all_lun() if volumes: for volume in volumes: if volume.get('raidgroup_id') and ( volume.get('raidgroup_id') != 'N/A' or volume.get( 'is_meta_lun') == 'YES'): pool_id = None if volume.get('raidgroup_id') != 'N/A': pool_id = '%s%s' % (consts.RAID_GROUP_ID_PREFIX, volume.get('raidgroup_id')) status = consts.STATUS_MAP.get( volume.get('state'), constants.StoragePoolStatus.OFFLINE) vol_type = consts.VOL_TYPE_MAP.get( volume.get('is_thin_lun').lower()) total_cap = float( volume.get('lun_capacitymegabytes')) * units.Mi used_cap = total_cap free_cap = 0 v = { 'name': volume.get('name'), 'storage_id': storage_id, 'status': status, 'native_volume_id': str( volume.get('logical_unit_number')), 'native_storage_pool_id': pool_id, 'type': vol_type, 'total_capacity': int(total_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap), 'wwn': volume.get('uid') } volume_list.append(v) return volume_list def list_volumes(self, storage_id): volumes = self.navi_handler.get_pool_lun() pools = self.navi_handler.get_pools() pool_ids = {} if pools: for pool in pools: if pool.get('pool_name') is not None: pool_ids[pool.get('pool_name')] = pool.get('pool_id') volume_list = self.handle_volume_from_pool(volumes, pool_ids, storage_id) raid_volumes = self.handle_volume_from_raid_group(storage_id) if raid_volumes: volume_list.extend(raid_volumes) return volume_list def handle_disk_capacity(self): disks = self.navi_handler.get_disks() raw_capacity = 0 if disks: for disk in disks: if disk.get('disk_id') is not None: capacity = float(disk.get("capacity", 0)) raw_capacity += capacity return raw_capacity * units.Mi def handle_pool_capacity(self): pools = self.list_storage_pools(None) total_capacity = 0 free_capacity = 0 used_capacity = 0 obj_model = None if pools: for pool in pools: total_capacity += pool.get("total_capacity") free_capacity += pool.get("free_capacity") used_capacity += pool.get("used_capacity") obj_model = { 'total_capacity': total_capacity, 'free_capacity': free_capacity, 'used_capacity': used_capacity } return obj_model def list_disks(self, storage_id): disks = self.navi_handler.get_disks() disk_list = [] for disk in (disks or []): if disk.get('disk_id'): status = consts.DISK_STATUS_MAP.get( disk.get('state', '').upper(), constants.DiskStatus.ABNORMAL) capacity = int(float(disk.get("capacity", 0)) * units.Mi) logical_type = constants.DiskLogicalType.UNKNOWN hot_spare = disk.get('hot_spare', '') if hot_spare and hot_spare != 'N/A': logical_type = constants.DiskLogicalType.HOTSPARE disk_name = disk.get('disk_name') disk_name = ' '.join(disk_name.strip().split()) disk_model = { 'name': disk_name, 'storage_id': storage_id, 'native_disk_id': disk.get('disk_id'), 'serial_number': disk.get('serial_number'), 'manufacturer': disk.get('vendor_id'), 'model': disk.get('product_id'), 'firmware': disk.get('product_revision'), 'speed': None, 'capacity': capacity, 'status': status, 'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get( disk.get('drive_type', '').upper(), constants.DiskPhysicalType.UNKNOWN), 'logical_type': logical_type, 'health_score': None, 'native_disk_group_id': None, 'location': disk_name } disk_list.append(disk_model) return disk_list def analyse_speed(self, speed_value): speed = 0 try: speeds = re.findall("\\d+", speed_value) if speeds: speed = int(speeds[0]) if 'Gbps' in speed_value: speed = speed * units.G elif 'Mbps' in speed_value: speed = speed * units.M elif 'Kbps' in speed_value: speed = speed * units.k except Exception as err: err_msg = "analyse speed error: %s" % (six.text_type(err)) LOG.error(err_msg) return speed def list_controllers(self, storage_id): controllers = self.navi_handler.get_controllers() cpus = self.navi_handler.get_cpus() controller_list = [] for controller in (controllers or []): memory_size = int(controller.get('memory_size_for_the_sp', '0')) * units.Mi cpu_info = '' cpu_count = None if cpus: cpu_info = cpus.get( controller.get('serial_number_for_the_sp', ''), '') if cpu_info: cpu_count = 1 controller_model = { 'name': controller.get('sp_name'), 'storage_id': storage_id, 'native_controller_id': controller.get('signature_for_the_sp'), 'status': constants.ControllerStatus.NORMAL, 'location': None, 'soft_version': controller.get('revision_number_for_the_sp'), 'cpu_info': cpu_info, 'cpu_count': cpu_count, 'memory_size': str(memory_size) } controller_list.append(controller_model) return controller_list def list_ports(self, storage_id): port_list = [] io_configs = self.navi_handler.get_io_configs() iscsi_port_map = self.get_iscsi_ports() ports = self.get_ports(storage_id, io_configs, iscsi_port_map) port_list.extend(ports) bus_ports = self.get_bus_ports(storage_id, io_configs) port_list.extend(bus_ports) return port_list def get_ports(self, storage_id, io_configs, iscsi_port_map): ports = self.navi_handler.get_ports() port_list = [] for port in (ports or []): port_id = port.get('sp_port_id') sp_name = port.get('sp_name').replace('SP ', '') name = '%s-%s' % (sp_name, port_id) location = 'Slot %s%s,Port %s' % ( sp_name, port.get('i/o_module_slot'), port.get('physical_port_id')) mac_address = port.get('mac_address') if mac_address == 'Not Applicable': mac_address = None module_key = '%s_%s' % ( sp_name, port.get('i/o_module_slot')) type = '' if io_configs: type = io_configs.get(module_key, '') ipv4 = None ipv4_mask = None if iscsi_port_map: iscsi_port = iscsi_port_map.get(name) if iscsi_port: ipv4 = iscsi_port.get('ip_address') ipv4_mask = iscsi_port.get('subnet_mask') port_model = { 'name': location, 'storage_id': storage_id, 'native_port_id': name, 'location': location, 'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get( port.get('link_status', '').upper(), constants.PortConnectionStatus.UNKNOWN), 'health_status': consts.PORT_HEALTH_STATUS_MAP.get( port.get('port_status', '').upper(), constants.PortHealthStatus.UNKNOWN), 'type': consts.PORT_TYPE_MAP.get( type.upper(), constants.PortType.OTHER), 'logical_type': None, 'speed': self.analyse_speed( port.get('speed_value', '')), 'max_speed': self.analyse_speed( port.get('max_speed', '')), 'native_parent_id': None, 'wwn': port.get('sp_uid'), 'mac_address': mac_address, 'ipv4': ipv4, 'ipv4_mask': ipv4_mask, 'ipv6': None, 'ipv6_mask': None, } port_list.append(port_model) return port_list def get_bus_ports(self, storage_id, io_configs): bus_ports = self.navi_handler.get_bus_ports() port_list = [] if bus_ports: bus_port_state_map = self.navi_handler.get_bus_port_state() for bus_port in bus_ports: sps = bus_port.get('sps') for sp in (sps or []): sp_name = sp.replace('sp', '').upper() location = '%s %s,Port %s' % ( bus_port.get('i/o_module_slot'), sp_name, bus_port.get('physical_port_id')) native_port_id = location.replace(' ', '') native_port_id = native_port_id.replace(',', '') module_key = '%s_%s' % ( sp_name, bus_port.get('i/o_module_slot')) type = '' if io_configs: type = io_configs.get(module_key, '') state = '' if bus_port_state_map: port_state_key = '%s_%s' % ( sp_name, bus_port.get('physical_port_id')) state = bus_port_state_map.get(port_state_key, '') port_model = { 'name': location, 'storage_id': storage_id, 'native_port_id': native_port_id, 'location': location, 'connection_status': constants.PortConnectionStatus.UNKNOWN, 'health_status': consts.PORT_HEALTH_STATUS_MAP.get( state.upper(), constants.PortHealthStatus.UNKNOWN), 'type': consts.PORT_TYPE_MAP.get( type.upper(), constants.PortType.OTHER), 'logical_type': None, 'speed': self.analyse_speed( bus_port.get('current_speed', '')), 'max_speed': self.analyse_speed( bus_port.get('max_speed', '')), 'native_parent_id': None, 'wwn': None, 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, } port_list.append(port_model) return port_list def get_iscsi_ports(self): iscsi_port_map = {} iscsi_ports = self.navi_handler.get_iscsi_ports() for iscsi_port in (iscsi_ports or []): name = '%s-%s' % (iscsi_port.get('sp'), iscsi_port.get('port_id')) iscsi_port_map[name] = iscsi_port return iscsi_port_map def list_masking_views(self, storage_id): views = self.navi_handler.list_masking_views() views_list = [] host_vv_set = set() if views: for view in views: name = view.get('storage_group_name') host_names = view.get('host_names') lun_ids = view.get('lun_ids') if name: if name == '~physical' or name == '~management': continue view_model_template = { 'native_masking_view_id': view.get( 'storage_group_uid'), "name": view.get('storage_group_name'), "storage_id": storage_id } if host_names and lun_ids: host_names = list(set(host_names)) for host_name in host_names: host_id = host_name.replace(' ', '') for lun_id in lun_ids: host_vv_key = '%s_%s' % (host_id, lun_id) if host_vv_key in host_vv_set: continue host_vv_set.add(host_vv_key) view_model = copy.deepcopy(view_model_template) view_model[ 'native_storage_host_id'] = host_id view_model['native_volume_id'] = lun_id view_model[ 'native_masking_view_id'] = '%s_%s_%s' % ( view_model.get('native_masking_view_id'), host_id, lun_id) views_list.append(view_model) return views_list def list_storage_host_initiators(self, storage_id): initiators = self.navi_handler.list_hbas() initiators_list = [] initiator_set = set() port_types = {} if initiators: ports = self.list_ports(storage_id) for port in (ports or []): if port and port.get('type'): port_types[port.get('name')] = port.get('type') for initiator in (initiators or []): if initiator and initiator.get('hba_uid'): hba_uid = initiator.get('hba_uid') type = '' if port_types: ports = initiator.get('port_ids') if ports: port_id = list(ports)[0] type = port_types.get(port_id, '') host_id = initiator.get('server_name', '').replace(' ', '') if host_id == hba_uid: host_id = None if not host_id: continue if hba_uid in initiator_set: continue initiator_set.add(hba_uid) initiator_model = { "name": hba_uid, "storage_id": storage_id, "native_storage_host_initiator_id": hba_uid, "wwn": hba_uid, "type": consts.INITIATOR_TYPE_MAP.get( type.upper(), constants.InitiatorType.UNKNOWN), "status": constants.InitiatorStatus.ONLINE, "native_storage_host_id": host_id } initiators_list.append(initiator_model) return initiators_list def list_storage_hosts(self, storage_id): hosts = self.navi_handler.list_hbas() host_list = [] host_ids = set() host_ips = {} for host in (hosts or []): if host and host.get('server_name'): os_type = constants.HostOSTypes.UNKNOWN os_name = host.get('hba_vendor_description') ip_addr = host.get('server_ip_address') if ip_addr == 'UNKNOWN': continue if os_name and 'VMware ESXi' in os_name: os_type = constants.HostOSTypes.VMWARE_ESX id = host.get('server_name').replace(' ', '') if id in host_ids: continue host_ids.add(id) if ip_addr in host_ips.keys(): first_port_ids = host_ips.get(ip_addr) cur_port_ids = host.get('port_ids') add_host = False intersections = list( set(first_port_ids).intersection(set(cur_port_ids))) if not intersections: add_host = True if not add_host: continue host_ips[ip_addr] = host.get('port_ids') host_model = { "name": host.get('server_name'), "storage_id": storage_id, "native_storage_host_id": id, "os_type": os_type, "status": constants.HostStatus.NORMAL, "ip_address": ip_addr } host_list.append(host_model) return host_list def collect_perf_metrics(self, storage_id, resource_metrics, start_time, end_time): metrics = [] archive_file_list = [] try: LOG.info("Start collection, storage:%s, start time:%s, end time:%s" % (storage_id, start_time, end_time)) archive_file_list = self._get__archive_file(start_time, end_time) LOG.info("Get archive files: {}".format(archive_file_list)) if not archive_file_list: LOG.warning("The required performance file was not found!") return metrics resources_map, resources_type_map = self._get_resources_map( resource_metrics) if not resources_map or not resources_type_map: LOG.warning("Resource object not found!") return metrics performance_lines_map = self._filter_performance_data( archive_file_list, resources_map, start_time, end_time) if not performance_lines_map: LOG.warning("The required performance data was not found!") return metrics metrics = self.create_metrics(storage_id, resource_metrics, resources_map, resources_type_map, performance_lines_map) LOG.info("Collection complete, storage:%s, start time:%s, " "end time:%s, length of metrics:%s " % (storage_id, start_time, end_time, len(metrics))) except exception.DelfinException as err: err_msg = "Failed to collect metrics from VnxBlockStor: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise err except Exception as err: err_msg = "Failed to collect metrics from VnxBlockStor: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) finally: self._remove_archive_file(archive_file_list) return metrics def create_metrics(self, storage_id, resource_metrics, resources_map, resources_type_map, performance_lines_map): metrics = [] for resource_obj, resource_type in resources_type_map.items(): if not resources_map.get(resource_obj) \ or not resource_type: continue if not performance_lines_map.get(resource_obj): continue labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resources_map.get(resource_obj), 'type': 'RAW', 'unit': '' } metric_model_list = self._get_metric_model( resource_metrics.get(resource_type), labels, performance_lines_map.get(resource_obj), consts.RESOURCES_TYPE_TO_METRIC_CAP.get(resource_type), resource_type) if metric_model_list: metrics.extend(metric_model_list) return metrics def _get__archive_file(self, start_time, end_time): archive_file_list = [] archives = self.navi_handler.get_archives() tools = Tools() for archive_info in (archives or []): collection_timestamp = tools.time_str_to_timestamp( archive_info.get('collection_time'), consts.TIME_PATTERN) if collection_timestamp > start_time: archive_file_list.append(archive_info.get('archive_name')) if collection_timestamp > end_time: break return archive_file_list def _get_metric_model(self, metric_list, labels, metric_values, obj_cap, resources_type): metric_model_list = [] tools = Tools() for metric_name in (metric_list or []): values = {} obj_labels = copy.copy(labels) obj_labels['unit'] = obj_cap.get(metric_name).get('unit') for metric_value in metric_values: metric_value_infos = metric_value if not consts.METRIC_MAP.get(resources_type, {}).get( metric_name): continue value = metric_value_infos[ consts.METRIC_MAP.get(resources_type).get(metric_name)] if not value: value = '0' collection_timestamp = tools.time_str_to_timestamp( metric_value_infos[1], consts.TIME_PATTERN) collection_time_str = tools.timestamp_to_time_str( collection_timestamp, consts.COLLECTION_TIME_PATTERN) collection_timestamp = tools.time_str_to_timestamp( collection_time_str, consts.COLLECTION_TIME_PATTERN) if "iops" == obj_cap.get(metric_name).get('unit').lower(): value = int(float(value)) else: value = float('%.6f' % (float(value))) values[collection_timestamp] = value if values: metric_model = constants.metric_struct(name=metric_name, labels=obj_labels, values=values) metric_model_list.append(metric_model) return metric_model_list def _get_resources_map(self, resource_metrics): resources_map = {} resources_type_map = {} for resource_type_key in resource_metrics.keys(): sub_resources_map = {} sub_resources_type_map = {} if resource_type_key == constants.ResourceType.CONTROLLER: sub_resources_map, sub_resources_type_map = \ self._get_controllers_map() elif resource_type_key == constants.ResourceType.PORT: sub_resources_map, sub_resources_type_map = \ self._get_ports_map() elif resource_type_key == constants.ResourceType.DISK: sub_resources_map, sub_resources_type_map = \ self._get_disks_map() elif resource_type_key == constants.ResourceType.VOLUME: sub_resources_map, sub_resources_type_map = \ self._get_volumes_map() if sub_resources_map and sub_resources_type_map: resources_map.update(sub_resources_map) resources_type_map.update(sub_resources_type_map) return resources_map, resources_type_map def _get_controllers_map(self): resources_map = {} resources_type_map = {} controllers = self.navi_handler.get_controllers() for controller in (controllers or []): resources_map[controller.get('sp_name')] = controller.get( 'signature_for_the_sp') resources_type_map[controller.get('sp_name')] = \ constants.ResourceType.CONTROLLER return resources_map, resources_type_map def _get_ports_map(self): resources_map = {} resources_type_map = {} ports = self.navi_handler.get_ports() for port in (ports or []): port_id = port.get('sp_port_id') sp_name = port.get('sp_name').replace('SP ', '') name = '%s-%s' % (sp_name, port_id) port_id = 'Port %s [ %s ]' % (port_id, port.get('sp_uid')) resources_map[port_id] = name resources_type_map[port_id] = constants.ResourceType.PORT return resources_map, resources_type_map def _get_disks_map(self): resources_map = {} resources_type_map = {} disks = self.navi_handler.get_disks() for disk in (disks or []): disk_name = disk.get('disk_name') disk_name = ' '.join(disk_name.strip().split()) resources_map[disk_name] = disk.get('disk_id') resources_type_map[disk_name] = constants.ResourceType.DISK return resources_map, resources_type_map def _get_volumes_map(self): resources_map = {} resources_type_map = {} volumes = self.navi_handler.get_all_lun() for volume in (volumes or []): if not volume.get('name'): continue volume_name = '%s [%s]' % ( volume.get('name'), volume.get('logical_unit_number')) resources_map[volume_name] = str(volume.get('logical_unit_number')) resources_type_map[volume_name] = constants.ResourceType.VOLUME return resources_map, resources_type_map def _filter_performance_data(self, archive_file_list, resources_map, start_time, end_time): performance_lines_map = {} try: tools = Tools() for archive_file in archive_file_list: self.navi_handler.download_archives(archive_file) archive_name_infos = archive_file.split('.') file_path = '%s%s.csv' % ( self.navi_handler.get_local_file_path(), archive_name_infos[0]) with open(file_path) as file: f_csv = csv.reader(file) next(f_csv) for row in f_csv: self._package_performance_data(row, resources_map, start_time, end_time, tools, performance_lines_map) except Exception as err: err_msg = "Failed to filter performance data: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.StorageBackendException(err_msg) return performance_lines_map def _package_performance_data(self, row, resources_map, start_time, end_time, tools, performance_lines_map): resource_obj_name = row[0] resource_obj_name = self._package_resource_obj_name(resource_obj_name) if resource_obj_name in resources_map: obj_collection_timestamp = tools.time_str_to_timestamp( row[1], consts.TIME_PATTERN) if (start_time + consts.TIME_INTERVAL_FLUCTUATION) \ <= obj_collection_timestamp \ and obj_collection_timestamp \ <= (end_time + consts.TIME_INTERVAL_FLUCTUATION): performance_lines_map.setdefault(resource_obj_name, []).append( row) def _package_resource_obj_name(self, source_name): target_name = source_name if 'Port ' in target_name: return re.sub(r'(\[.*;)', '[', target_name) elif '; ' in target_name: return re.sub(r'(; .*])', ']', target_name) return target_name def _remove_archive_file(self, archive_file_list): try: for archive_file in archive_file_list: nar_file_path = '%s%s' % ( self.navi_handler.get_local_file_path(), archive_file) archive_name_infos = archive_file.split('.') csv_file_path = '%s%s.csv' % ( self.navi_handler.get_local_file_path(), archive_name_infos[0]) for file_path in [nar_file_path, csv_file_path]: LOG.info("Delete file :{}".format(file_path)) if os.path.exists(file_path): os.remove(file_path) else: err_msg = 'no such file:%s' % file_path LOG.error(err_msg) raise exception.StorageBackendException(err_msg) except Exception as err: err_msg = "Failed to remove archive file: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.StorageBackendException(err_msg) def get_latest_perf_timestamp(self, storage_id): latest_time = 0 num = 0 tools = Tools() while latest_time <= 0: num += 1 latest_time, file_latest_time = self.check_latest_timestamp( storage_id) if num > consts.EXEC_MAX_NUM: latest_time = file_latest_time LOG.warning("Storage:{}, Exit after {} executions.".format( storage_id, consts.EXEC_MAX_NUM)) break if latest_time <= 0: wait_time = tools.timestamp_to_time_str( time.time() * units.k, consts.ARCHIVE_FILE_NAME_TIME_PATTERN) LOG.warning("Storage:{} No new file found, " "wait for next execution:{}".format(storage_id, wait_time)) time.sleep(consts.SLEEP_TIME_SECONDS) return latest_time def get_data_latest_timestamp(self, storage_id): archive_file_list = [] try: tools = Tools() archive_name = self.navi_handler.create_archives(storage_id) LOG.info("Create archive_name: {}".format(archive_name)) archive_file_list.append(archive_name) archive_name_infos = archive_name.split('.') file_path = '%s%s.csv' % ( self.navi_handler.get_local_file_path(), archive_name_infos[0]) resource_obj_name = '' collection_time = '' with open(file_path) as file: f_csv = csv.reader(file) next(f_csv) for row in f_csv: if not resource_obj_name or resource_obj_name == row[0]: resource_obj_name = row[0] collection_time = row[1] else: break latest_time = tools.time_str_to_timestamp(collection_time, consts.TIME_PATTERN) except Exception as err: err_msg = "Failed to get latest perf timestamp " \ "from VnxBlockStor: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) finally: self._remove_archive_file(archive_file_list) return latest_time def check_latest_timestamp(self, storage_id): latest_time = 0 file_latest_time = self.get_data_latest_timestamp(storage_id) sys_time = self.navi_handler.get_sp_time() LOG.info("Get sys_time=={},file_latest_time=={}".format( sys_time, file_latest_time)) if sys_time > 0 and file_latest_time > 0: LOG.info("(sys_time - file_latest_time)={}".format( (sys_time - file_latest_time))) if (sys_time - file_latest_time) < \ consts.CREATE_FILE_TIME_INTERVAL: latest_time = file_latest_time time.sleep(consts.CHECK_WAITE_TIME_SECONDS) return latest_time, file_latest_time ================================================ FILE: delfin/drivers/dell_emc/vnx/vnx_block/consts.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import exception from delfin.common import constants from delfin.common.constants import ControllerMetric, DiskMetric, PortMetric, \ VolumeMetric SOCKET_TIMEOUT = 30 LOGIN_SOCKET_TIMEOUT = 10 CER_ERR = 'Unable to validate the identity of the server' CALLER_ERR = 'Caller not privileged' SECURITY_ERR = 'Security file not found' TRYING_CONNECT_ERR = 'error occurred while trying to connect' CONNECTION_ERR = 'connection refused' INVALID_ERR = 'invalid username, password and/or scope' NOT_SUPPORTED_ERR = 'CLI commands are not supported by the target storage' \ ' system' EXCEPTION_MAP = {CER_ERR: exception.SSLCertificateFailed, CALLER_ERR: exception.InvalidUsernameOrPassword, SECURITY_ERR: exception.InvalidUsernameOrPassword, TRYING_CONNECT_ERR: exception.InvalidIpOrPort, CONNECTION_ERR: exception.InvalidIpOrPort, INVALID_ERR: exception.InvalidUsernameOrPassword, NOT_SUPPORTED_ERR: exception.StorageBackendException} CER_STORE = '2' CER_REJECT = '3' DISK_ID_KEY = 'Bus 0 Enclosure 0 Disk' LUN_ID_KEY = 'LOGICAL UNIT NUMBER' LUN_NAME_KEY = 'Name ' CER_SEPARATE_KEY = '-----------------------------' TIME_PATTERN = '%m/%d/%Y %H:%M:%S' DATE_PATTERN = '%m/%d/%Y' ONE_DAY_SCE = 24 * 60 * 60 LOG_FILTER_PATTERN = '\\(7[0-7]([a-f]|[0-9]){2}\\)' NAVISECCLI_API = 'naviseccli -User %(username)s -password %(password)s' \ ' -scope 0 -t %(timeout)d -h %(host)s' CER_ADD_API = 'naviseccli security -certificate -add -file' CER_LIST_API = 'naviseccli security -certificate -list' CER_REMOVE_API = 'naviseccli security -certificate -remove' GET_AGENT_API = 'getagent' GET_DOMAIN_API = 'domain -list' GET_STORAGEPOOL_API = 'storagepool -list' GET_RAIDGROUP_API = 'getrg' GET_DISK_API = 'getdisk' GET_LUN_API = 'lun -list' GET_GETALLLUN_API = 'getall -lun' GET_SP_API = 'getsp' GET_PORT_API = 'port -list -sp -all' GET_BUS_PORT_API = 'backendbus -get -all' GET_BUS_PORT_STATE_API = 'ioportconfig -list -iomodule basemodule' \ ' -portstate -pportid' GET_ISCSI_PORT_API = 'connection -getport' GET_IO_PORT_CONFIG_API = 'ioportconfig -list -all' GET_RESUME_API = 'getresume -all' GET_LOG_API = 'getlog -date %(begin_time)s %(end_time)s' EMCVNX_VENDOR = 'DELL EMC' RAID_GROUP_ID_PREFIX = 'raid_group_' GET_SG_LIST_HOST_API = 'storagegroup -messner -list -host' GET_PORT_LIST_HBA_API = 'port -list -hba' STATUS_MAP = { 'Ready': constants.StoragePoolStatus.NORMAL, 'Offline': constants.StoragePoolStatus.OFFLINE, 'Valid_luns': constants.StoragePoolStatus.NORMAL, 'Busy': constants.StoragePoolStatus.ABNORMAL, 'Halted': constants.StoragePoolStatus.ABNORMAL, 'Defragmenting': constants.StoragePoolStatus.NORMAL, 'Expanding': constants.StoragePoolStatus.NORMAL, 'Explicit Remove': constants.StoragePoolStatus.OFFLINE, 'Invalid': constants.StoragePoolStatus.OFFLINE, 'Bound': constants.StoragePoolStatus.NORMAL } VOL_TYPE_MAP = {'no': constants.VolumeType.THICK, 'yes': constants.VolumeType.THIN} VOL_COMPRESSED_MAP = {'no': False, 'yes': True} DEFAULT_QUERY_LOG_DAYS = 9 SECS_OF_TEN_DAYS = DEFAULT_QUERY_LOG_DAYS * ONE_DAY_SCE OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' OID_MESSAGECODE = '1.3.6.1.4.1.1981.1.4.5' OID_DETAILS = '1.3.6.1.4.1.1981.1.4.6' SEVERITY_MAP = {"76": constants.Severity.CRITICAL, "75": constants.Severity.MAJOR, "74": constants.Severity.MINOR, "73": constants.Severity.WARNING, "72": constants.Severity.WARNING, "77": constants.Severity.FATAL, "71": constants.Severity.INFORMATIONAL, "70": constants.Severity.INFORMATIONAL} TRAP_LEVEL_MAP = {'1.3.6.1.4.1.1981.0.6': constants.Severity.CRITICAL, '1.3.6.1.4.1.1981.0.5': constants.Severity.MINOR, '1.3.6.1.4.1.1981.0.4': constants.Severity.WARNING, '1.3.6.1.4.1.1981.0.3': constants.Severity.INFORMATIONAL, '1.3.6.1.4.1.1981.0.2': constants.Severity.INFORMATIONAL } DISK_STATUS_MAP = { 'BINDING': constants.DiskStatus.ABNORMAL, 'ENABLED': constants.DiskStatus.NORMAL, 'EMPTY': constants.DiskStatus.ABNORMAL, 'EXPANDING': constants.DiskStatus.ABNORMAL, 'FORMATTING': constants.DiskStatus.ABNORMAL, 'OFF': constants.DiskStatus.ABNORMAL, 'POWERING UP': constants.DiskStatus.ABNORMAL, 'REBUILDING': constants.DiskStatus.ABNORMAL, 'REMOVED': constants.DiskStatus.ABNORMAL, 'UNASSIGNED': constants.DiskStatus.ABNORMAL, 'UNBOUND': constants.DiskStatus.NORMAL, 'UNFORMATTED': constants.DiskStatus.ABNORMAL, 'UNSUPPORTED': constants.DiskStatus.ABNORMAL } DISK_PHYSICAL_TYPE_MAP = { 'SATA': constants.DiskPhysicalType.SATA, 'SAS': constants.DiskPhysicalType.SAS, 'SSD': constants.DiskPhysicalType.SSD, 'NL-SAS': constants.DiskPhysicalType.NL_SAS, 'NL-SSD': constants.DiskPhysicalType.NL_SSD, 'FLASH': constants.DiskPhysicalType.FLASH, 'SAS FLASH VP': constants.DiskPhysicalType.SAS_FLASH_VP, 'FIBRE CHANNEL': constants.DiskPhysicalType.FC, 'ATA': constants.DiskPhysicalType.ATA } SPPORT_KEY = "Information about each SPPORT:" PORT_CONNECTION_STATUS_MAP = { 'UP': constants.PortConnectionStatus.CONNECTED, 'DOWN': constants.PortConnectionStatus.DISCONNECTED } PORT_HEALTH_STATUS_MAP = { 'ONLINE': constants.PortHealthStatus.NORMAL, 'DISABLED': constants.PortHealthStatus.ABNORMAL, 'ENABLED': constants.PortHealthStatus.NORMAL, 'MISSING': constants.PortHealthStatus.ABNORMAL } PORT_TYPE_MAP = { 'FIBRE': constants.PortType.FC, 'FCOE': constants.PortType.FCOE, 'ISCSI': constants.PortType.ISCSI, 'SAS': constants.PortType.SAS, 'UNKNOWN': constants.PortType.OTHER } INITIATOR_TYPE_MAP = { 'FC': constants.InitiatorType.FC, 'FCOE': constants.InitiatorType.FC, 'ISCSI': constants.InitiatorType.ISCSI, 'SAS': constants.InitiatorType.SAS, 'UNKNOWN': constants.InitiatorType.UNKNOWN } ALU_PAIRS_PATTERN = '^[0-9]+\\s+[0-9]+$' HBA_UID_PATTERN = "^\\s*HBA UID\\s+SP Name\\s+SPPort" CONTROLLER_CAP = { ControllerMetric.IOPS.name: { "unit": ControllerMetric.IOPS.unit, "description": ControllerMetric.IOPS.description }, ControllerMetric.READ_IOPS.name: { "unit": ControllerMetric.READ_IOPS.unit, "description": ControllerMetric.READ_IOPS.description }, ControllerMetric.WRITE_IOPS.name: { "unit": ControllerMetric.WRITE_IOPS.unit, "description": ControllerMetric.WRITE_IOPS.description }, ControllerMetric.THROUGHPUT.name: { "unit": ControllerMetric.THROUGHPUT.unit, "description": ControllerMetric.THROUGHPUT.description }, ControllerMetric.READ_THROUGHPUT.name: { "unit": ControllerMetric.READ_THROUGHPUT.unit, "description": ControllerMetric.READ_THROUGHPUT.description }, ControllerMetric.WRITE_THROUGHPUT.name: { "unit": ControllerMetric.WRITE_THROUGHPUT.unit, "description": ControllerMetric.WRITE_THROUGHPUT.description }, ControllerMetric.RESPONSE_TIME.name: { "unit": ControllerMetric.RESPONSE_TIME.unit, "description": ControllerMetric.RESPONSE_TIME.description } } VOLUME_CAP = { VolumeMetric.IOPS.name: { "unit": VolumeMetric.IOPS.unit, "description": VolumeMetric.IOPS.description }, VolumeMetric.READ_IOPS.name: { "unit": VolumeMetric.READ_IOPS.unit, "description": VolumeMetric.READ_IOPS.description }, VolumeMetric.WRITE_IOPS.name: { "unit": VolumeMetric.WRITE_IOPS.unit, "description": VolumeMetric.WRITE_IOPS.description }, VolumeMetric.THROUGHPUT.name: { "unit": VolumeMetric.THROUGHPUT.unit, "description": VolumeMetric.THROUGHPUT.description }, VolumeMetric.READ_THROUGHPUT.name: { "unit": VolumeMetric.READ_THROUGHPUT.unit, "description": VolumeMetric.READ_THROUGHPUT.description }, VolumeMetric.WRITE_THROUGHPUT.name: { "unit": VolumeMetric.WRITE_THROUGHPUT.unit, "description": VolumeMetric.WRITE_THROUGHPUT.description }, VolumeMetric.RESPONSE_TIME.name: { "unit": VolumeMetric.RESPONSE_TIME.unit, "description": VolumeMetric.RESPONSE_TIME.description }, VolumeMetric.READ_CACHE_HIT_RATIO.name: { "unit": VolumeMetric.READ_CACHE_HIT_RATIO.unit, "description": VolumeMetric.READ_CACHE_HIT_RATIO.description }, VolumeMetric.WRITE_CACHE_HIT_RATIO.name: { "unit": VolumeMetric.WRITE_CACHE_HIT_RATIO.unit, "description": VolumeMetric.WRITE_CACHE_HIT_RATIO.description }, VolumeMetric.READ_IO_SIZE.name: { "unit": VolumeMetric.READ_IO_SIZE.unit, "description": VolumeMetric.READ_IO_SIZE.description }, VolumeMetric.WRITE_IO_SIZE.name: { "unit": VolumeMetric.WRITE_IO_SIZE.unit, "description": VolumeMetric.WRITE_IO_SIZE.description } } PORT_CAP = { PortMetric.IOPS.name: { "unit": PortMetric.IOPS.unit, "description": PortMetric.IOPS.description }, PortMetric.READ_IOPS.name: { "unit": PortMetric.READ_IOPS.unit, "description": PortMetric.READ_IOPS.description }, PortMetric.WRITE_IOPS.name: { "unit": PortMetric.WRITE_IOPS.unit, "description": PortMetric.WRITE_IOPS.description }, PortMetric.THROUGHPUT.name: { "unit": PortMetric.THROUGHPUT.unit, "description": PortMetric.THROUGHPUT.description }, PortMetric.READ_THROUGHPUT.name: { "unit": PortMetric.READ_THROUGHPUT.unit, "description": PortMetric.READ_THROUGHPUT.description }, PortMetric.WRITE_THROUGHPUT.name: { "unit": PortMetric.WRITE_THROUGHPUT.unit, "description": PortMetric.WRITE_THROUGHPUT.description } } DISK_CAP = { DiskMetric.IOPS.name: { "unit": DiskMetric.IOPS.unit, "description": DiskMetric.IOPS.description }, DiskMetric.READ_IOPS.name: { "unit": DiskMetric.READ_IOPS.unit, "description": DiskMetric.READ_IOPS.description }, DiskMetric.WRITE_IOPS.name: { "unit": DiskMetric.WRITE_IOPS.unit, "description": DiskMetric.WRITE_IOPS.description }, DiskMetric.THROUGHPUT.name: { "unit": DiskMetric.THROUGHPUT.unit, "description": DiskMetric.THROUGHPUT.description }, DiskMetric.READ_THROUGHPUT.name: { "unit": DiskMetric.READ_THROUGHPUT.unit, "description": DiskMetric.READ_THROUGHPUT.description }, DiskMetric.WRITE_THROUGHPUT.name: { "unit": DiskMetric.WRITE_THROUGHPUT.unit, "description": DiskMetric.WRITE_THROUGHPUT.description }, DiskMetric.RESPONSE_TIME.name: { "unit": DiskMetric.RESPONSE_TIME.unit, "description": DiskMetric.RESPONSE_TIME.description } } RESOURCES_TYPE_TO_METRIC_CAP = { constants.ResourceType.CONTROLLER: CONTROLLER_CAP, constants.ResourceType.PORT: PORT_CAP, constants.ResourceType.DISK: DISK_CAP, constants.ResourceType.VOLUME: VOLUME_CAP, } METRIC_MAP = { constants.ResourceType.CONTROLLER: { ControllerMetric.IOPS.name: 16, ControllerMetric.READ_IOPS.name: 25, ControllerMetric.WRITE_IOPS.name: 34, ControllerMetric.THROUGHPUT.name: 13, ControllerMetric.READ_THROUGHPUT.name: 19, ControllerMetric.WRITE_THROUGHPUT.name: 28, ControllerMetric.RESPONSE_TIME.name: 10 }, constants.ResourceType.PORT: { PortMetric.IOPS.name: 16, PortMetric.READ_IOPS.name: 25, PortMetric.WRITE_IOPS.name: 34, PortMetric.THROUGHPUT.name: 13, PortMetric.READ_THROUGHPUT.name: 19, PortMetric.WRITE_THROUGHPUT.name: 28 }, constants.ResourceType.DISK: { DiskMetric.IOPS.name: 16, DiskMetric.READ_IOPS.name: 25, DiskMetric.WRITE_IOPS.name: 34, DiskMetric.THROUGHPUT.name: 13, DiskMetric.READ_THROUGHPUT.name: 19, DiskMetric.WRITE_THROUGHPUT.name: 28, DiskMetric.RESPONSE_TIME.name: 10 }, constants.ResourceType.VOLUME: { VolumeMetric.IOPS.name: 16, VolumeMetric.READ_IOPS.name: 25, VolumeMetric.WRITE_IOPS.name: 34, VolumeMetric.THROUGHPUT.name: 13, VolumeMetric.READ_THROUGHPUT.name: 19, VolumeMetric.WRITE_THROUGHPUT.name: 28, VolumeMetric.RESPONSE_TIME.name: 10, VolumeMetric.READ_CACHE_HIT_RATIO.name: 42, VolumeMetric.WRITE_CACHE_HIT_RATIO.name: 45, VolumeMetric.READ_IO_SIZE.name: 22, VolumeMetric.WRITE_IO_SIZE.name: 31 } } ARCHIVE_FILE_NAME = '%s_SPA_%s.nar' GET_SP_TIME = 'getsptime' GET_NAR_INTERVAL_API = 'analyzer -get -narinterval' GET_ARCHIVE_API = 'analyzer -archive -list' CREATE_ARCHIVE_API = 'analyzer -archiveretrieve -file %s -location %s ' \ '-overwrite y -retry 3' DOWNLOAD_ARCHIVE_API = 'analyzer -archive -file %s -path %s -o' ARCHIVEDUMP_API = 'analyzer -archivedump -data %s%s -out %s%s.csv' ARCHIVE_FILE_DIR = "/delfin/drivers/utils/performance_file/vnx_block/" GET_SP_TIME_PATTERN = '%m/%d/%y %H:%M:%S' ARCHIVE_FILE_NAME_TIME_PATTERN = '%Y_%m_%d_%H_%M_%S' # Unit: s SLEEP_TIME_SECONDS = 60 # Unit: ms CREATE_FILE_TIME_INTERVAL = 150000 # Unit: ms EXEC_TIME_INTERVAL = 240000 EXEC_MAX_NUM = 50 # Unit: ms TIME_INTERVAL_FLUCTUATION = 3000 REPLACE_PATH = "/delfin/drivers/dell_emc/vnx/vnx_block" # Unit: s CHECK_WAITE_TIME_SECONDS = 15 COLLECTION_TIME_PATTERN = '%m/%d/%Y %H:%M:00' ================================================ FILE: delfin/drivers/dell_emc/vnx/vnx_block/navi_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import threading import time import six from oslo_log import log as logging from delfin import cryptor from oslo_utils import units from delfin import exception from delfin.drivers.dell_emc.vnx.vnx_block import consts from delfin.drivers.dell_emc.vnx.vnx_block.navicli_client import NaviClient from delfin.drivers.utils.tools import Tools LOG = logging.getLogger(__name__) class NaviHandler(object): session_lock = None def __init__(self, **kwargs): cli_access = kwargs.get('cli') if cli_access is None: raise exception.InvalidInput('Input navicli_access is missing') self.navi_host = cli_access.get('host') self.navi_port = cli_access.get('port') self.navi_username = cli_access.get('username') self.navi_password = cli_access.get('password') self.navi_timeout = cli_access.get('conn_timeout', consts.SOCKET_TIMEOUT) self.verify = kwargs.get('verify', False) self.session_lock = threading.Lock() def get_cli_command_str(self, host_ip=None, sub_command=None, timeout=None): if host_ip is None: host_ip = self.navi_host if timeout is None: timeout = self.navi_timeout command_str = consts.NAVISECCLI_API % { 'username': self.navi_username, 'password': cryptor.decode(self.navi_password), 'host': host_ip, 'timeout': timeout} if self.navi_port: command_str = '%s -port %d' % (command_str, self.navi_port) command_str = '%s %s' % (command_str, sub_command) return command_str def login(self, host_ip=None): """Successful login returns the version number Failure to log in will throw an exception """ version = '' if host_ip is None: host_ip = self.navi_host accept_cer = consts.CER_STORE if self.verify: accept_cer = consts.CER_REJECT self.remove_cer(host_ip=host_ip) cer_add_command = '%s %s' % (consts.CER_ADD_API, self.verify) NaviClient.exec(cer_add_command.split()) command_str = \ self.get_cli_command_str(host_ip=host_ip, sub_command=consts.GET_AGENT_API, timeout=consts.LOGIN_SOCKET_TIMEOUT) result = NaviClient.exec(command_str.split(), stdin_value=accept_cer) if result: agent_model = self.cli_res_to_dict(result) if agent_model: version = agent_model.get("revision") return version def remove_cer(self, host_ip=None): if host_ip is None: host_ip = self.navi_host cer_list_str = NaviClient.exec(consts.CER_LIST_API.split()) cer_map = self.analyse_cer(cer_list_str, host_ip) if cer_map.get(host_ip): cer_remove_command = '%s -issuer %s -serialNumber %s' % ( consts.CER_REMOVE_API, cer_map.get(host_ip).get('issuer'), cer_map.get(host_ip).get('serial#')) NaviClient.exec(cer_remove_command.split()) def get_agent(self): return self.get_resources_info(consts.GET_AGENT_API, self.cli_res_to_dict) def get_domain(self): return self.get_resources_info(consts.GET_DOMAIN_API, self.cli_domain_to_dict) def get_pools(self): return self.get_resources_info(consts.GET_STORAGEPOOL_API, self.cli_res_to_list) def get_disks(self): return self.get_resources_info(consts.GET_DISK_API, self.cli_disk_to_list) def get_raid_group(self): return self.get_resources_info(consts.GET_RAIDGROUP_API, self.cli_raid_to_list) def get_pool_lun(self): return self.get_resources_info(consts.GET_LUN_API, self.cli_res_to_list) def get_all_lun(self): return self.get_resources_info(consts.GET_GETALLLUN_API, self.cli_lun_to_list) def get_controllers(self): return self.get_resources_info(consts.GET_SP_API, self.cli_sp_to_list) def get_cpus(self): return self.get_resources_info(consts.GET_RESUME_API, self.cli_cpu_to_dict) def get_ports(self): return self.get_resources_info(consts.GET_PORT_API, self.cli_port_to_list) def get_bus_ports(self): return self.get_resources_info(consts.GET_BUS_PORT_API, self.cli_bus_port_to_list) def get_bus_port_state(self): return self.get_resources_info(consts.GET_BUS_PORT_STATE_API, self.cli_bus_port_state_to_dict) def get_iscsi_ports(self): return self.get_resources_info(consts.GET_ISCSI_PORT_API, self.cli_iscsi_port_to_list) def get_io_configs(self): return self.get_resources_info(consts.GET_IO_PORT_CONFIG_API, self.cli_io_config_to_dict) def get_resources_info(self, sub_command, analyse_type): # Execute commands to query data and analyze try: command_str = self.get_cli_command_str(sub_command=sub_command) resource_info = self.navi_exe(command_str.split()) return_value = None if resource_info: return_value = analyse_type(resource_info) except Exception as e: err_msg = "Failed to get resources info from %s: %s" \ % (sub_command, six.text_type(e)) LOG.error(err_msg) raise e return return_value def cli_res_to_dict(self, resource_info): obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if ':' in str_line: str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) except Exception as e: err_msg = "arrange resource info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_model def cli_res_to_list(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if consts.DISK_ID_KEY in str_line: str_line = str_line.replace(consts.DISK_ID_KEY, "disk id:") if consts.LUN_ID_KEY in str_line: str_line = str_line.replace(consts.LUN_ID_KEY, "lun id:") if ':' not in str_line: continue str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) else: obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} # If the last object is not added to the LIST, # perform the join operation obj_list = self.add_model_to_list(obj_model, obj_list) except Exception as e: err_msg = "cli resource to list error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_raid_to_list(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() # Use 'RaidGroup ID' to determine whether it is # a new object if str_line and str_line.startswith('RaidGroup ID:'): obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} if str_line: if ':' not in str_line: continue str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) # If the last object is not added to the LIST, # perform the join operation obj_list = self.add_model_to_list(obj_model, obj_list) except Exception as e: err_msg = "arrange raid info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_sp_to_list(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if ':' not in str_line: obj_model['sp_name'] = str_line else: str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) if str_line and str_line.startswith( 'SP SCSI ID if Available:'): obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} except Exception as e: err_msg = "arrange sp info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_port_to_list(self, resource_info): obj_list = [] obj_model = {} max_speed_str = '' previous_line = '' try: spport_infos = resource_info.split(consts.SPPORT_KEY)[1] obj_infos = spport_infos.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if ':' in str_line: str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) previous_line = str_line else: if 'Available Speeds:' in previous_line: if 'Auto' not in str_line \ and str_line > max_speed_str: max_speed_str = str_line else: if max_speed_str: obj_model['max_speed'] = max_speed_str obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} max_speed_str = '' previous_line = '' if obj_model: if max_speed_str: obj_model['max_speed'] = max_speed_str obj_list = self.add_model_to_list(obj_model, obj_list) except Exception as e: err_msg = "arrange port info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_bus_port_to_list(self, resource_info): obj_list = [] obj_model = {} sp_list = [] max_speed_str = '' previous_line = '' try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if 'Bus ' in str_line and ':' not in str_line: if max_speed_str: obj_model['max_speed'] = max_speed_str obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} sp_list = [] max_speed_str = '' previous_line = '' obj_model['bus_name'] = str_line elif ':' in str_line: previous_line = str_line str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) if ' Connector State' in str_line: sp_list.append( str_info[0].replace('_connector_state', '')) obj_model['sps'] = sp_list else: if 'Available Speeds:' in previous_line: if 'Auto' not in str_line \ and str_line > max_speed_str: max_speed_str = str_line if max_speed_str: obj_model['max_speed'] = max_speed_str obj_list = self.add_model_to_list(obj_model, obj_list) except Exception as e: err_msg = "arrange port info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_bus_port_state_to_dict(self, resource_info): obj_model = {} try: obj_infos = resource_info.split('\n') sp = '' port_id = '' for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if 'SP ID:' in str_line: str_info = self.split_str_by_colon(str_line) sp = str_info[1] if 'Physical Port ID:' in str_line: str_info = self.split_str_by_colon(str_line) port_id = str_info[1] if 'Port State:' in str_line: str_info = self.split_str_by_colon(str_line) obj_model[sp + '_' + port_id] = str_info[1] except Exception as e: err_msg = "arrange bus port state info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_model def cli_iscsi_port_to_list(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if ':' in str_line: str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) else: obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} except Exception as e: err_msg = "arrange iscsi port info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_io_config_to_dict(self, resource_info): obj_model = {} try: obj_list = [] obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if ':' in str_line: str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) else: obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} for config in obj_list: if config.get('i/o_module_slot'): key = '%s_%s' % ( config.get('sp_id'), config.get('i/o_module_slot')) obj_model[key] = config.get('i/o_module_type').replace( ' Channel', '') except Exception as e: err_msg = "arrange io port config info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_model def cli_cpu_to_dict(self, resource_info): obj_model = {} try: obj_list = [] obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if 'CPU Module' in str_line: str_line = '%s:True' % str_line str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) else: obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} for cpu_module in obj_list: if cpu_module.get('cpu_module'): obj_model[ cpu_module.get('emc_serial_number')] = cpu_module.get( 'assembly_name') except Exception as e: err_msg = "arrange cpu info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_model def cli_disk_to_list(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if str_line.startswith('Bus '): disk_name = 'disk_name:%s' % str_line str_info = self.split_str_by_colon(disk_name) obj_model = self.str_info_to_model(str_info, obj_model) str_line = "disk id:%s" % (str_line.replace(' ', '')) if ':' not in str_line: continue str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) else: obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} # If the last object is not added to the LIST, # perform the join operation obj_list = self.add_model_to_list(obj_model, obj_list) except Exception as e: err_msg = "cli resource to list error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_domain_to_dict(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') node_value = '' for obj_info in obj_infos: str_line = obj_info.strip() # Use "IP Address" to determine whether it is a new object if str_line and str_line.startswith('IP Address:'): obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} if str_line: if 'Master' in str_line: obj_model['master'] = 'True' str_line = str_line.replace('(Master)', '') str_info = self.split_str_by_colon(str_line) if str_line and str_line.startswith('Node:'): node_value = str_info[1] continue if str_line and str_line.startswith('IP Address:'): obj_model['node'] = node_value obj_model = self.str_info_to_model(str_info, obj_model) # If the last object is not added to the LIST, # perform the join operation obj_list = self.add_model_to_list(obj_model, obj_list) except Exception as e: err_msg = "arrange domain info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def cli_lun_to_list(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line and str_line.startswith(consts.LUN_ID_KEY): obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} if str_line: if str_line.startswith(consts.LUN_ID_KEY): str_line = str_line.replace(consts.LUN_ID_KEY, 'LOGICAL UNIT NUMBER:') if str_line.startswith(consts.LUN_NAME_KEY): str_line = str_line.replace(consts.LUN_NAME_KEY, 'Name:') if ':' not in str_line: continue str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) obj_list = self.add_model_to_list(obj_model, obj_list) except Exception as e: err_msg = "arrange lun info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def analyse_cer(self, resource_info, host_ip=None): cer_map = {} obj_model = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line and consts.CER_SEPARATE_KEY not in str_line: str_info = self.split_str_by_colon(str_line) if str_info[0] == 'issuer' and host_ip not in str_info[1]: continue obj_model[str_info[0]] = str_info[1] else: if obj_model and obj_model.get('issuer'): cer_map[host_ip] = obj_model break except Exception as e: err_msg = "arrange cer info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return cer_map def split_str_by_colon(self, str_line): str_info = [] if str_line: # str_info[0] is the parsed attribute name, there are some special # characters such as spaces, brackets, etc., # str_info[1] is the value str_info = str_line.split(':', 1) str_info[0] = str_info[0].strip() str_info[0] = str_info[0].replace(" ", "_") \ .replace("(", "").replace(")", "").lower() if len(str_info) > 1: str_info[1] = str_info[1].strip() return str_info def str_info_to_model(self, str_info, obj_model): # Some information is'attribute: value' # Some attributes: no value for example: # Pool ID: 1 # Description: # State: Offline if str_info: key = None value = None if len(str_info) > 1: key = str_info[0] value = str_info[1] elif len(str_info) == 1: key = str_info[0] obj_model[key] = value return obj_model def add_model_to_list(self, obj_model, obj_list): if len(obj_model) > 0: obj_list.append(obj_model) return obj_list def navi_exe(self, command_str, host_ip=None): self.session_lock.acquire() try: if command_str: accept_cer = consts.CER_STORE if self.verify: accept_cer = consts.CER_REJECT result = NaviClient.exec(command_str, stdin_value=accept_cer) return result except exception.SSLCertificateFailed as e: LOG.error("ssl error: %s", six.text_type(e)) self.login(host_ip) result = NaviClient.exec(command_str) return result except exception.InvalidUsernameOrPassword as e: LOG.error("auth error: %s", six.text_type(e)) self.login(host_ip) result = NaviClient.exec(command_str) return result except Exception as e: err_msg = "naviseccli exec error: %s" % (six.text_type(e)) LOG.error(err_msg) raise e finally: self.session_lock.release() def list_masking_views(self): return self.get_resources_info(consts.GET_SG_LIST_HOST_API, self.cli_sg_to_list) def cli_sg_to_list(self, resource_info): obj_list = [] obj_model = {} try: obj_infos = resource_info.split('\n') pattern = re.compile(consts.ALU_PAIRS_PATTERN) for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if ':' not in str_line: search_obj = pattern.search(str_line) if search_obj: str_info = str_line.split() lun_ids = obj_model.get('lun_ids') if lun_ids: lun_ids.add(str_info[1]) else: lun_ids = set() lun_ids.add(str_info[1]) obj_model['lun_ids'] = lun_ids else: str_info = self.split_str_by_colon(str_line) if 'Host name:' in str_line: host_names = obj_model.get('host_names') if host_names: host_names.add(str_info[1]) else: host_names = set() host_names.add(str_info[1]) obj_model['host_names'] = host_names continue obj_model = self.str_info_to_model(str_info, obj_model) if str_line.startswith('Shareable:'): obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} except Exception as e: err_msg = "arrange sg info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def list_hbas(self): return self.get_resources_info(consts.GET_PORT_LIST_HBA_API, self.cli_hba_to_list) def cli_hba_to_list(self, resource_info): obj_list = [] obj_model = {} sp_name = '' port_ids = set() try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if 'Information about each HBA:' in obj_info: if obj_model: obj_model['port_ids'] = port_ids obj_list = self.add_model_to_list(obj_model, obj_list) obj_model = {} port_ids = set() sp_name = '' if ':' in obj_info: str_info = self.split_str_by_colon(str_line) obj_model = self.str_info_to_model(str_info, obj_model) if 'SP Name:' in obj_info: sp_name = obj_info.replace('SP Name:', '').replace( 'SP', '').replace('\r', '').replace(' ', '') if 'SP Port ID:' in obj_info: port_id = obj_info.replace('SP Port ID:', '').replace('\r', '').replace( ' ', '') port_id = '%s-%s' % (sp_name, port_id) port_ids.add(port_id) if obj_model: obj_model['port_ids'] = port_ids obj_list.append(obj_model) except Exception as e: err_msg = "arrange host info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def get_archives(self): return self.get_resources_info(consts.GET_ARCHIVE_API, self.cli_archives_to_list) def cli_archives_to_list(self, resource_info): obj_list = [] try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: archive_infos = str_line.split() if archive_infos and len(archive_infos) == 5: obj_model = {} obj_model['collection_time'] = \ "%s %s" % (archive_infos[2], archive_infos[3]) obj_model['archive_name'] = archive_infos[4] obj_list.append(obj_model) except Exception as e: err_msg = "arrange archives info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def download_archives(self, archive_name): download_archive_api = consts.DOWNLOAD_ARCHIVE_API % ( archive_name, self.get_local_file_path()) self.get_resources_info(download_archive_api, self.cli_res_to_list) archive_name_infos = archive_name.split('.') archivedump_api = consts.ARCHIVEDUMP_API % ( self.get_local_file_path(), archive_name, self.get_local_file_path(), archive_name_infos[0]) self.get_resources_info(archivedump_api, self.cli_res_to_list) def get_local_file_path(self): driver_path = os.path.abspath(os.path.join(os.getcwd())) driver_path = driver_path.replace("\\", "/") driver_path = driver_path.replace(consts.REPLACE_PATH, "") local_path = '%s%s' % (driver_path, consts.ARCHIVE_FILE_DIR) return local_path def get_sp_time(self): return self.get_resources_info(consts.GET_SP_TIME, self.analysis_sp_time) def analysis_sp_time(self, resource_info): system_time = 0 try: tools = Tools() obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if "Time on SP A:" in str_line: time_str = str_line.replace("Time on SP A:", "").strip() system_time = tools.time_str_to_timestamp( time_str, consts.GET_SP_TIME_PATTERN) except Exception as e: err_msg = "analysis sp time error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return system_time def get_nar_interval(self): return self.get_resources_info(consts.GET_NAR_INTERVAL_API, self.analysis_nar_interval) def analysis_nar_interval(self, resource_info): nar_interval = 60 try: if resource_info and ":" in resource_info: nar_interval_str = resource_info.split(":")[1].strip() nar_interval = int(nar_interval_str) except Exception as e: err_msg = "analysis sp time error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return nar_interval def get_archive_file_name(self, storage_id): tools = Tools() create_time = tools.timestamp_to_time_str( time.time() * units.k, consts.ARCHIVE_FILE_NAME_TIME_PATTERN) archive_file_name = consts.ARCHIVE_FILE_NAME % (storage_id, create_time) return archive_file_name def create_archives(self, storage_id): archive_name = self.get_archive_file_name(storage_id) create_archive_api = consts.CREATE_ARCHIVE_API % ( archive_name, self.get_local_file_path()) self.get_resources_info(create_archive_api, self.cli_res_to_list) archive_name_infos = archive_name.split('.') archivedump_api = consts.ARCHIVEDUMP_API % ( self.get_local_file_path(), archive_name, self.get_local_file_path(), archive_name_infos[0]) self.get_resources_info(archivedump_api, self.cli_res_to_list) return archive_name ================================================ FILE: delfin/drivers/dell_emc/vnx/vnx_block/navicli_client.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from subprocess import Popen, PIPE import six from oslo_log import log as logging from delfin import exception from delfin.drivers.dell_emc.vnx.vnx_block import consts LOG = logging.getLogger(__name__) class NaviClient(object): @staticmethod def exec(command_str, stdin_value=None): """execute command_str using Popen :param command_str: should be list type :param stdin_value: same as stdin of Popen :return: output of Popen.communicate """ try: p = Popen(command_str, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=False) except FileNotFoundError as e: err_msg = "naviseccli tool not found: %s" % (six.text_type(e)) LOG.error(err_msg) raise exception.ComponentNotFound('naviseccli') except Exception as e: err_msg = "naviseccli exec error: %s" % (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) if stdin_value: out, err = p.communicate( input=bytes(stdin_value, encoding='utf-8')) else: out = p.stdout.read() if isinstance(out, bytes): out = out.decode("utf-8") result = out.strip() if result: # Determine whether an exception occurs according # to the returned information for exception_key in consts.EXCEPTION_MAP.keys(): if stdin_value is None or stdin_value == consts.CER_STORE: if exception_key == consts.CER_ERR: continue if exception_key in result: LOG.error('VNX Block exec failed: %s' % result) raise consts.EXCEPTION_MAP.get(exception_key)(result) return result ================================================ FILE: delfin/drivers/dell_emc/vnx/vnx_block/vnx_block.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin.common import constants from delfin.drivers import driver from delfin.drivers.dell_emc.vnx.vnx_block import consts from delfin.drivers.dell_emc.vnx.vnx_block.alert_handler import AlertHandler from delfin.drivers.dell_emc.vnx.vnx_block.component_handler import \ ComponentHandler from delfin.drivers.dell_emc.vnx.vnx_block.navi_handler import NaviHandler LOG = log.getLogger(__name__) class VnxBlockStorDriver(driver.StorageDriver): """VnxBlockStorDriver implement EMC VNX Stor driver""" def __init__(self, **kwargs): super().__init__(**kwargs) self.navi_handler = NaviHandler(**kwargs) self.version = self.navi_handler.login() self.com_handler = ComponentHandler(self.navi_handler) def reset_connection(self, context, **kwargs): self.navi_handler.remove_cer() self.navi_handler.verify = kwargs.get('verify', False) self.navi_handler.login() def close_connection(self): pass def get_storage(self, context): return self.com_handler.get_storage() def list_storage_pools(self, context): return self.com_handler.list_storage_pools(self.storage_id) def list_volumes(self, context): return self.com_handler.list_volumes(self.storage_id) def list_alerts(self, context, query_para=None): raise NotImplementedError( "Driver API list_alerts() is not Implemented") def list_controllers(self, context): return self.com_handler.list_controllers(self.storage_id) def list_ports(self, context): return self.com_handler.list_ports(self.storage_id) def list_disks(self, context): return self.com_handler.list_disks(self.storage_id) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return AlertHandler.parse_alert(alert) def clear_alert(self, context, sequence_number): pass @staticmethod def get_access_url(): return 'https://{ip}' def list_storage_host_initiators(self, context): return self.com_handler.list_storage_host_initiators(self.storage_id) def list_storage_hosts(self, context): return self.com_handler.list_storage_hosts(self.storage_id) def list_masking_views(self, context): return self.com_handler.list_masking_views(self.storage_id) def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): return self.com_handler.collect_perf_metrics(storage_id, resource_metrics, start_time, end_time) @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver""" return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP } } def get_latest_perf_timestamp(self, context): return self.com_handler.get_latest_perf_timestamp(self.storage_id) ================================================ FILE: delfin/drivers/dell_emc/vplex/__init__.py ================================================ ================================================ FILE: delfin/drivers/dell_emc/vplex/alert_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib from oslo_log import log from delfin import exception, utils from delfin.common import constants from delfin.i18n import _ LOG = log.getLogger(__name__) class AlertHandler(object): OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' OID_COMPONENT = '1.3.6.1.4.1.1139.21.1.3.0' OID_SYMPTOMTEXT = '1.3.6.1.4.1.1139.21.1.5.0' TRAP_LEVEL_MAP = {'1.3.6.1.4.1.1139.21.0.1': constants.Severity.CRITICAL, '1.3.6.1.4.1.1139.21.0.2': constants.Severity.MAJOR, '1.3.6.1.4.1.1139.21.0.3': constants.Severity.WARNING, '1.3.6.1.4.1.1139.21.0.4': constants.Severity.INFORMATIONAL } SECONDS_TO_MS = 1000 @staticmethod def parse_alert(context, alert): try: description = alert.get(AlertHandler.OID_SYMPTOMTEXT) alert_model = dict() alert_model['alert_id'] = alert.get(AlertHandler.OID_COMPONENT) alert_model['alert_name'] = description alert_model['severity'] = AlertHandler.TRAP_LEVEL_MAP.get( alert.get(AlertHandler.OID_SEVERITY), constants.Severity.INFORMATIONAL) alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['occur_time'] = utils.utcnow_ms() alert_model['description'] = description alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['location'] = '' alert_model['match_key'] = hashlib.md5(description.encode()). \ hexdigest() return alert_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing " "in alert message.")) raise exception.InvalidResults(msg) ================================================ FILE: delfin/drivers/dell_emc/vplex/consts.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants SOCKET_TIMEOUT = 10 BASE_CONTEXT = '/vplex' REST_AUTH_URL = '/vplex/clusters' PORT_TYPE_MAP = { 'fc': constants.PortType.FC, 'iscsi': constants.PortType.ISCSI, 'ficon': constants.PortType.FICON, 'fcoe': constants.PortType.FCOE, 'eth': constants.PortType.ETH, 'sas': constants.PortType.SAS, 'ib': constants.PortType.IB, 'other': constants.PortType.OTHER, } INITIATOR_DESCRIPTION = { 'iscsi': constants.InitiatorType.ISCSI, 'fc': constants.InitiatorType.FC, } PORT_LOGICAL_TYPE_MAP = { 'front-end': constants.PortLogicalType.FRONTEND, 'back-end': constants.PortLogicalType.BACKEND, 'service': constants.PortLogicalType.SERVICE, 'management': constants.PortLogicalType.MANAGEMENT, 'internal': constants.PortLogicalType.INTERNAL, 'maintenance': constants.PortLogicalType.MAINTENANCE, 'inter-director-communication': constants.PortLogicalType.INTERCONNECT, 'other': constants.PortLogicalType.OTHER, 'local-com': constants.PortLogicalType.INTERCLUSTER, 'wan-com': constants.PortLogicalType.CLUSTER_MGMT } PORT_CONNECT_STATUS_MAP = { 'up': constants.PortConnectionStatus.CONNECTED, 'down': constants.PortConnectionStatus.DISCONNECTED, 'no-link': constants.PortConnectionStatus.UNKNOWN, 'ok': constants.PortConnectionStatus.CONNECTED, 'pending': constants.PortConnectionStatus.CONNECTED, 'suspended': constants.PortConnectionStatus.DISCONNECTED, 'hardware error': constants.PortConnectionStatus.UNKNOWN } PORT_HEALTH_STATUS_MAP = { 'ok': constants.PortHealthStatus.NORMAL, 'error': constants.PortHealthStatus.ABNORMAL, 'stopped': constants.PortHealthStatus.UNKNOWN } CONTROLLER_STATUS_MAP = { "ok": constants.ControllerStatus.NORMAL, "busy": constants.ControllerStatus.NORMAL, "no contact": constants.ControllerStatus.OFFLINE, "lost communication": constants.ControllerStatus.OFFLINE, "unknown": constants.ControllerStatus.UNKNOWN } HOST_TYPE_MAP = { "hpux": constants.HostOSTypes.HP_UX, "aix": constants.HostOSTypes.AIX, "unknown": constants.HostOSTypes.UNKNOWN } ================================================ FILE: delfin/drivers/dell_emc/vplex/rest_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log as logging from delfin import cryptor from delfin import exception from delfin.drivers.dell_emc.vplex import consts from delfin.drivers.utils.rest_client import RestClient LOG = logging.getLogger(__name__) class RestHandler(RestClient): def __init__(self, **kwargs): super(RestHandler, self).__init__(**kwargs) def login(self): try: data = {} self.init_http_head() self.session.headers.update({ "username": self.rest_username, "password": cryptor.decode(self.rest_password)}) res = self.do_call(consts.REST_AUTH_URL, data, 'GET') if res.status_code != 200: LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", {"url": consts.REST_AUTH_URL, "reason": res.text}) if 'User authentication failed' in res.text: raise exception.InvalidUsernameOrPassword() else: raise exception.StorageBackendException( six.text_type(res.text)) except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e def get_rest_info(self, url, data=None, method='GET'): """Return dict result of the url response.""" result_json = None res = self.do_call(url, data, method) if res.status_code == 200: result_json = res.json().get('response') return result_json def get_virtual_volume_by_name_resp(self, cluster_name, virtual_volume_name): url = '%s/clusters/%s/virtual-volumes/%s' % \ (consts.BASE_CONTEXT, cluster_name, virtual_volume_name) response = self.get_rest_info(url) return response def get_virtual_volume_resp(self, cluster_name): url = '%s/clusters/%s/virtual-volumes' % ( consts.BASE_CONTEXT, cluster_name) response = self.get_rest_info(url) return response def get_cluster_resp(self): uri = '%s/clusters' % consts.BASE_CONTEXT response = self.get_rest_info(uri) return response def get_devcie_resp(self, cluster_name): url = '%s/clusters/%s/devices' % (consts.BASE_CONTEXT, cluster_name) response = self.get_rest_info(url) return response def get_device_by_name_resp(self, cluster_name, device_name): url = '%s/clusters/%s/devices/%s' % ( consts.BASE_CONTEXT, cluster_name, device_name) response = self.get_rest_info(url) return response def get_health_check_resp(self): url = '%s/health-check' % consts.BASE_CONTEXT data = {"args": "-l"} response = self.get_rest_info(url, data, method='POST') return response def get_cluster_by_name_resp(self, cluster_name): url = '%s/clusters/%s' % (consts.BASE_CONTEXT, cluster_name) response = self.get_rest_info(url) return response def get_storage_volume_summary_resp(self, cluster_name): url = '%s/storage-volume+summary' % consts.BASE_CONTEXT args = '--clusters %s' % cluster_name data = {"args": args} response = self.get_rest_info(url, data, method='POST') return response def get_device_summary_resp(self, cluster_name): url = '%s/local-device+summary' % consts.BASE_CONTEXT args = '--clusters %s' % cluster_name data = {"args": args} response = self.get_rest_info(url, data, method='POST') return response def get_virtual_volume_summary_resp(self, cluster_name): url = '%s/virtual-volume+summary' % consts.BASE_CONTEXT args = '--clusters %s' % cluster_name data = {"args": args} response = self.get_rest_info(url, data, method='POST') return response def logout(self): try: if self.session: self.session.close() except Exception as e: err_msg = "Logout error: %s" % (six.text_type(e)) LOG.error(err_msg) raise e def get_engine_director_resp(self): url = '%s/engines/*/directors/*' % consts.BASE_CONTEXT response = self.get_rest_info(url) return response def get_version_verbose(self): url = '%s/version' % consts.BASE_CONTEXT args = '-a --verbose' data = {"args": args} response = self.get_rest_info(url, data, method='POST') return response def get_cluster_export_port_resp(self): url = '%s/clusters/*/exports/ports/*' % consts.BASE_CONTEXT response = self.get_rest_info(url) return response def get_engine_director_hardware_port_resp(self): url = '%s/engines/*/directors/*/hardware/ports/*' % consts.BASE_CONTEXT response = self.get_rest_info(url) return response def get_initiators_resp(self): url = '%s/clusters/*/exports/initiator-ports/*' % consts.BASE_CONTEXT response = self.get_rest_info(url) return response def get_storage_views(self): url = '%s/clusters/*/exports/storage-views/*' % consts.BASE_CONTEXT response = self.get_rest_info(url) return response ================================================ FILE: delfin/drivers/dell_emc/vplex/vplex_stor.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import six from delfin import exception from oslo_log import log from oslo_utils import units from delfin.common import constants from delfin.drivers import driver from delfin.drivers.dell_emc.vplex import alert_handler from delfin.drivers.dell_emc.vplex import rest_handler from delfin.drivers.dell_emc.vplex import consts LOG = log.getLogger(__name__) class VplexStorageDriver(driver.StorageDriver): """DELL EMC VPLEX storage driver implement the DELL EMC Storage driver""" def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_handler = rest_handler.RestHandler(**kwargs) self.rest_handler.login() def reset_connection(self, context, **kwargs): self.rest_handler.logout() self.rest_handler.verify = kwargs.get('verify', False) self.rest_handler.login() def get_storage(self, context): health_check = self.rest_handler.get_health_check_resp() all_cluster = self.rest_handler.get_cluster_resp() cluster_name_list = VplexStorageDriver.get_resource_names(all_cluster) if cluster_name_list: health_map = {} custom_data = health_check.get("custom-data") VplexStorageDriver.handle_detail(custom_data, health_map, split=':') for cluster_name in cluster_name_list: response = self.rest_handler.get_cluster_by_name_resp( cluster_name) attr_map = VplexStorageDriver.get_attribute_map(response) operate_status = attr_map.get('operational-status') health_status = attr_map.get('health-state') status = VplexStorageDriver.analyse_storage_status( operate_status, health_status) try: raw_capacity = self.get_cluster_raw_capacity(cluster_name) total_capacity = self.get_cluster_total_capacity( cluster_name) used_capacity = self.get_cluster_used_capacity( cluster_name) except Exception: error_msg = "Failed to get capacity from VPLEX!" raise exception.StorageBackendException(error_msg) free_capacity = total_capacity - used_capacity if free_capacity < 0: free_capacity = 0 cluster = { 'name': cluster_name, 'vendor': 'DELL EMC', 'description': 'EMC VPlex Storage', 'status': status, 'serial_number': attr_map.get('top-level-assembly'), 'firmware_version': health_map.get("Product Version"), 'model': 'EMC VPLEX ' + health_map.get("Product Type"), 'location': '', 'raw_capacity': int(raw_capacity), 'total_capacity': int(total_capacity), 'used_capacity': int(used_capacity), 'free_capacity': int(free_capacity) } break return cluster def list_storage_pools(self, context): device_list = [] all_cluster = self.rest_handler.get_cluster_resp() cluster_name_list = VplexStorageDriver.get_resource_names(all_cluster) for cluster_name in cluster_name_list: response_device = self.rest_handler.get_devcie_resp(cluster_name) map_device_childer = VplexStorageDriver.get_children_map( response_device) for name, resource_type in map_device_childer.items(): response_dn = self.rest_handler.get_device_by_name_resp( cluster_name, name) map_dn_attribute = VplexStorageDriver.get_attribute_map( response_dn) virtual_volume = map_dn_attribute.get("virtual-volume") total_capacity_str = map_dn_attribute.get("capacity") total_capacity = VplexStorageDriver.analyse_capacity( total_capacity_str) operate_status = map_dn_attribute.get('operational-status') health_status = map_dn_attribute.get('health-state') used_capacity = 0 free_capacity = 0 if virtual_volume: used_capacity = total_capacity else: free_capacity = total_capacity device = { 'name': name, 'storage_id': self.storage_id, 'native_storage_pool_id': map_dn_attribute.get( "system-id"), 'description': 'EMC VPlex Pool', 'status': self.analyse_status(operate_status, health_status), 'storage_type': constants.StorageType.BLOCK, 'total_capacity': int(total_capacity), 'used_capacity': int(used_capacity), 'free_capacity': int(free_capacity) } device_list.append(device) return device_list def list_volumes(self, context): vv_list = [] all_cluster = self.rest_handler.get_cluster_resp() cluster_name_list = VplexStorageDriver.get_resource_names(all_cluster) for cluster_name in cluster_name_list: resposne_vv = self.rest_handler.get_virtual_volume_resp( cluster_name) map_vv_children = VplexStorageDriver.get_children_map(resposne_vv) for name, resource_type in map_vv_children.items(): response_vvn = self.rest_handler. \ get_virtual_volume_by_name_resp(cluster_name, name) map_vvn_attribute = VplexStorageDriver.get_attribute_map( response_vvn) thin_enabled = map_vvn_attribute.get("thin-enabled") operate_status = map_vvn_attribute.get('operational-status') health_status = map_vvn_attribute.get('health-state') vv_type = self.analyse_vv_type(thin_enabled) total_capacity = VplexStorageDriver.analyse_capacity( map_vvn_attribute.get("capacity")) vpd_id = map_vvn_attribute.get("vpd-id") cells = vpd_id.split(":") wwn = '' if len(cells) > 1: wwn = cells[1] used_capacity = 0 if vv_type == constants.VolumeType.THICK: used_capacity = total_capacity vv = { 'name': name, 'storage_id': self.storage_id, 'description': 'EMC VPlex volume', 'status': self.analyse_status(operate_status, health_status), 'native_volume_id': vpd_id, 'native_storage_pool_id': map_vvn_attribute.get( 'supporting-device'), 'type': vv_type, 'total_capacity': int(total_capacity), 'used_capacity': int(used_capacity), 'free_capacity': 0, 'wwn': wwn } vv_list.append(vv) return vv_list def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return alert_handler.AlertHandler().parse_alert(context, alert) def list_alerts(self, context, query_para=None): info_msg = "list_alerts is not supported in model VPLEX" LOG.info(info_msg) raise NotImplementedError(info_msg) def clear_alert(self, context, alert): pass @staticmethod def get_access_url(): return 'https://{ip}' @staticmethod def get_attribute_map(response): attr_map = {} if response: contexts = response.get("context") for context in contexts: attributes = context.get("attributes") for attribute in attributes: key = attribute.get("name") value = attribute.get("value") attr_map[key] = value return attr_map @staticmethod def analyse_capacity(capacity_str): capacity = 0 if capacity_str.strip(): capacity = re.findall("\\d+", capacity_str)[0] return capacity @staticmethod def analyse_status(operational_status, health_status): status = constants.StorageStatus.ABNORMAL status_normal = ["ok"] status_offline = ["unknown", "isolated", "not-running", "non-recoverable-error"] if operational_status and health_status in status_normal: status = constants.StorageStatus.NORMAL elif operational_status and health_status in status_offline: status = constants.StorageStatus.OFFLINE return status @staticmethod def analyse_storage_status(operational_status, health_status): status = constants.StorageStatus.ABNORMAL status_normal = ["ok"] status_offline = ["unknown", "isolated", "not-running", "non-recoverable-error"] if operational_status == constants.StorageStatus.DEGRADED: status = constants.StorageStatus.DEGRADED elif operational_status and health_status in status_normal: status = constants.StorageStatus.NORMAL elif operational_status and health_status in status_offline: status = constants.StorageStatus.OFFLINE return status @staticmethod def analyse_vv_type(thin_enabled): rs_type = constants.VolumeType.THICK if thin_enabled == "enabled": rs_type = constants.VolumeType.THIN return rs_type @staticmethod def get_children_map(response): child_map = {} if response: contexts = response.get("context") for context in contexts: childrens = context.get("children") for children in childrens: name = children.get("name") type = children.get("type") child_map[name] = type return child_map @staticmethod def get_resource_names(response): resource_name_list = [] if response: contexts = response.get('context') for context in contexts: childer_clusters = context.get("children") for childer_cluster in childer_clusters: cluster_name = childer_cluster.get("name") resource_name_list.append(cluster_name) return resource_name_list @staticmethod def handle_detail(detail_info, detail_map, split): detail_arr = detail_info.split('\n') for detail in detail_arr: if detail is not None and detail != '': strinfo = detail.split(split, 1) key = strinfo[0] value = '' if len(strinfo) > 1: value = strinfo[1] detail_map[key] = value def get_cluster_raw_capacity(self, cluster_name): resposne_summary = self.rest_handler. \ get_storage_volume_summary_resp(cluster_name) try: custom_data = resposne_summary.get("custom-data") find_capacity = re.findall( r"Capacity\s+total\s+(([0-9]*(\.[0-9]{1,3}))|([0-9]+))", custom_data) find_capacity_str = find_capacity[-1][0] find_capacity_float = float(find_capacity_str) capacity = int(find_capacity_float * units.Ti) except Exception as e: LOG.error("Storage raw capacity, cluster %s analyse error %s" % cluster_name, six.text_type(e)) raise e return capacity def get_cluster_total_capacity(self, cluster_name): resposne_summary = self.rest_handler.get_device_summary_resp( cluster_name) try: custom_data = resposne_summary.get("custom-data") find_capacity = re.findall( r'total.*?(([0-9]*(\.[0-9]{1,3}))|([0-9]+))', custom_data) find_capacity_str = find_capacity[-1][0] find_capacity_float = float(find_capacity_str) capacity = int(find_capacity_float * units.Ti) except Exception as e: LOG.error("Storage total capacity, cluster %s analyse error %s" % cluster_name, six.text_type(e)) raise e return capacity def get_cluster_used_capacity(self, cluster_name): resposne_summary = self.rest_handler. \ get_virtual_volume_summary_resp(cluster_name) try: custom_data = resposne_summary.get("custom-data") find_capacity = re.findall( r"capacity\s+is\s+(([0-9]*(\.[0-9]{1,3}))|([0-9]+))", custom_data) find_capacity_str = find_capacity[-1][0] find_capacity_float = float(find_capacity_str) capacity = int(find_capacity_float * units.Ti) except Exception as e: LOG.error("Storage used capacity, cluster %s analyse error %s" % cluster_name, six.text_type(e)) raise e return capacity def list_controllers(self, context): """List all storage controllers from storage system.""" ct_list = [] director_version_map = {} version_resp = self.rest_handler.get_version_verbose() all_director = self.rest_handler.get_engine_director_resp() ct_context_list = VplexStorageDriver.get_context_list(all_director) VplexStorageDriver.analyse_director_version(version_resp, director_version_map) for ct_context in ct_context_list: ct_attr_map = ct_context.get("attributes") communication_status = ct_attr_map.get('communication-status') name = ct_attr_map.get('name') ct = { 'native_controller_id': ct_attr_map.get('director-id'), 'name': name, 'status': VplexStorageDriver.analyse_director_status( communication_status), 'location': '', 'storage_id': self.storage_id, 'soft_version': self.get_value_from_nest_map( director_version_map, name, "Director Software"), 'cpu_info': '', 'memory_size': '' } ct_list.append(ct) return ct_list def list_ports(self, context): """List all ports from storage system.""" port_list = [] hardware_port_map = {} hardware_port_resp = self.rest_handler. \ get_engine_director_hardware_port_resp() export_port_resp = self.rest_handler.get_cluster_export_port_resp() VplexStorageDriver.analyse_hardware_port(hardware_port_resp, hardware_port_map) port_context_list = VplexStorageDriver. \ get_context_list(export_port_resp) for port_context in port_context_list: port_attr = port_context.get('attributes') port_name = port_attr.get('name') export_status = port_attr.get('export-status') speed, max_speed, protocols, role, port_status, \ operational_status = self.get_hardware_port_info( hardware_port_map, port_name, 'attributes') connection_status = VplexStorageDriver.analyse_port_connect_status( export_status) port = { 'native_port_id': port_attr.get('name'), 'name': port_attr.get('name'), 'type': VplexStorageDriver.analyse_port_type(protocols), 'logical_type': VplexStorageDriver.analyse_port_logical_type( role), 'connection_status': connection_status, 'health_status': VplexStorageDriver.analyse_port_health_status( operational_status), 'location': '', 'storage_id': self.storage_id, 'native_parent_id': port_attr.get('director-id'), 'speed': VplexStorageDriver.analyse_speed(speed), 'max_speed': VplexStorageDriver.analyse_speed(max_speed), 'wwn': port_attr.get('port-wwn'), 'mac_address': '', 'ipv4': '', 'ipv4_mask': '', 'ipv6': '', 'ipv6_mask': '' } port_list.append(port) return port_list @staticmethod def get_context_list(response): context_list = [] if response: contexts = response.get("context") for context in contexts: ct_type = context.get("type") parent = context.get("parent") attributes = context.get("attributes") context_map = {} attr_map = {} for attribute in attributes: key = attribute.get("name") value = attribute.get("value") attr_map[key] = value context_map["type"] = ct_type context_map["parent"] = parent context_map["attributes"] = attr_map context_list.append(context_map) return context_list @staticmethod def analyse_director_version(version_resp, director_version_map): custom_data = version_resp.get('custom-data') detail_arr = custom_data.split('\n') director_name = '' version_name = '' for detail in detail_arr: if detail is not None and detail != '': if "For director" in detail: match_obj = re.search( r'For director.+?directors/(.*?):', detail) if match_obj: director_name = match_obj.group(1) continue if director_name: if "What:" in detail: match_obj = re.search(r'What:\s+(.+?)$', detail) if match_obj: version_name = match_obj.group(1) continue if version_name: match_obj = re.search(r'Version:\s+(.+?)$', detail) if match_obj: version_value = match_obj.group(1) if director_version_map.get(director_name): director_version_map.get(director_name)[ version_name] = version_value else: version_map = {} version_map[version_name] = version_value director_version_map[ director_name] = version_map @staticmethod def analyse_director_status(status): return consts.CONTROLLER_STATUS_MAP. \ get(status, constants.ControllerStatus.UNKNOWN) def get_director_specified_version(self, version_map, director_name, specified_name): version_value = '' if version_map: director_map = version_map.get(director_name) if director_map: version_value = director_map.get(specified_name) return version_value def get_value_from_nest_map(self, nest_map, first_key, second_key): final_value = '' if nest_map: second_map = nest_map.get(first_key) if second_map: final_value = second_map.get(second_key) return final_value def get_hardware_port_info(self, nest_map, first_key, second_key): speed = '' max_speed = '' protocols = [] role = '' port_status = '' operational_status = '' if nest_map: second_map = nest_map.get(first_key) if second_map: third_map = second_map.get(second_key) if third_map: speed = third_map.get('current-speed') max_speed = third_map.get('max-speed') protocols = third_map.get('protocols') role = third_map.get('role') port_status = third_map.get('port-status') operational_status = third_map.get('operational-status') return (speed, max_speed, protocols, role, port_status, operational_status) @staticmethod def analyse_hardware_port(resp, hardware_port_map): port_list = VplexStorageDriver.get_context_list(resp) if port_list: for port in port_list: port_attr = port.get("attributes") if port_attr: port_name = port_attr.get("target-port") hardware_port_map[port_name] = port @staticmethod def analyse_port_type(protocols): port_type = constants.PortType.OTHER if protocols: for protocol in protocols: port_type_value = consts.PORT_TYPE_MAP.get(protocol) if port_type_value: port_type = port_type_value break return port_type @staticmethod def analyse_port_logical_type(role): return consts.PORT_LOGICAL_TYPE_MAP. \ get(role, constants.PortLogicalType.OTHER) @staticmethod def analyse_port_connect_status(status): return consts.PORT_CONNECT_STATUS_MAP. \ get(status, constants.PortConnectionStatus.UNKNOWN) @staticmethod def analyse_port_health_status(status): return consts.PORT_HEALTH_STATUS_MAP. \ get(status, constants.PortHealthStatus.UNKNOWN) @staticmethod def analyse_speed(speed_value): speed = None if speed_value: match_obj = re.search(r'([1-9]\d*\.?\d*)|(0\.\d*[1-9])', speed_value) if match_obj: speed = int(match_obj.group(0)) if 'Gbit' in speed_value: speed = speed * units.G elif 'Mbit' in speed_value: speed = speed * units.M elif 'Kbit' in speed_value: speed = speed * units.k return speed def list_masking_views(self, content): try: view_list = [] view_response = self.rest_handler.get_storage_views() storage_view_list = self.get_attributes_from_response( view_response) if storage_view_list: host_list = self.list_storage_hosts(content) host_map = {} for host_value in host_list: host_map[host_value.get('name')] = \ host_value.get('native_storage_host_id') for storage_view in storage_view_list: virtual_volumes = storage_view.get('virtual-volumes') initiators_list = storage_view.get('initiators') view_name = storage_view.get('name') if initiators_list: for initiator_info in initiators_list: native_masking_view_id = initiator_info native_storage_host_id = host_map.get( initiator_info) if virtual_volumes: for virtual_volume in virtual_volumes: volume_value = virtual_volume.split( ',') native_volume_id = volume_value[2] volume_id = native_volume_id.replace( ':', '') view_map = { "name": view_name, "description": view_name, "storage_id": self.storage_id, "native_masking_view_id": native_masking_view_id + volume_id, "native_port_group_id": "port_group_" + initiator_info, "native_volume_id": native_volume_id, "native_storage_host_id": native_storage_host_id } view_list.append(view_map) return view_list except Exception: LOG.error("Failed to get view from vplex") raise def list_storage_host_initiators(self, content): try: initiators_list = [] initiators_response = self.rest_handler.get_initiators_resp() initiators_info_list = self.get_attributes_from_response( initiators_response) for initiators_map in initiators_info_list: initiators_type = initiators_map.get('port_type') initiators_type_arr = initiators_type.split('-') initiators_type_index = initiators_type_arr[0] description = consts.INITIATOR_DESCRIPTION.get( initiators_type_index, constants.InitiatorType.UNKNOWN) initiator_item = { "name": initiators_map.get('name'), "type": description, "storage_id": self.storage_id, "native_storage_host_initiator_id": initiators_map.get('port-wwn'), "wwn": initiators_map.get('port-wwn'), "alias": initiators_map.get('port-wwn'), "status": constants.InitiatorStatus.ONLINE, "native_storage_host_id": initiators_map.get('port-wwn') } initiators_list.append(initiator_item) return initiators_list except Exception: LOG.error("Failed to get host_initiators from vplex") raise def list_storage_hosts(self, content): try: hosts_list = [] host_response = self.rest_handler.get_initiators_resp() hosts_info_list = self.get_attributes_from_response(host_response) for host_info in hosts_info_list: os_type = host_info.get('type') host_dict = { "name": host_info.get('name'), "storage_id": self.storage_id, "os_type": consts.HOST_TYPE_MAP.get( os_type, constants.HostOSTypes.UNKNOWN), "native_storage_host_id": host_info.get('port-wwn'), "status": constants.HostStatus.NORMAL } hosts_list.append(host_dict) return hosts_list except Exception: LOG.error("Failed to get storage_host from vplex") raise def list_port_groups(self, context): try: port_groups_list = [] port_group_relation_list = [] port_group_response = self.rest_handler.get_storage_views() storage_view_list = self.get_attributes_from_response( port_group_response) for storage_view in storage_view_list: ports = storage_view.get('ports') initiators_info_list = storage_view.get('initiators') if initiators_info_list: for initiator_info in initiators_info_list: port_group_map = { "name": "port_group_" + initiator_info, "description": "port_group_" + initiator_info, "storage_id": self.storage_id, "native_port_group_id": "port_group_" + initiator_info, "ports": ports } if ports: for port in ports: port_group_relation = { 'storage_id': self.storage_id, 'native_port_group_id': "port_group_" + initiator_info, 'native_port_id': port } port_group_relation_list.append( port_group_relation) port_groups_list.append(port_group_map) port_groups_result = { 'port_groups': port_groups_list, 'port_grp_port_rels': port_group_relation_list } return port_groups_result except Exception: LOG.error("Failed to get port_groups from vplex") raise @staticmethod def get_attributes_from_response(response): attributes_list = [] if response: contexts = response.get("context") for context in contexts: child_map = {} attributes = context.get("attributes") context_type = context.get("type") child_map['port_type'] = context_type for children in attributes: name = children.get("name") value = children.get("value") child_map[name] = value attributes_list.append(child_map) return attributes_list @staticmethod def handle_detail_list(detail_info, detail_map, split): detail_arr = detail_info.split('\n') for detail in detail_arr: if detail is not None and detail != '': strinfo = detail.split(split, 1) key = strinfo[0] value = '' if len(strinfo) > 1: value = strinfo[1] detail_map[key] = value ================================================ FILE: delfin/drivers/driver.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six import abc @six.add_metaclass(abc.ABCMeta) class StorageDriver(object): def __init__(self, **kwargs): """ :param kwargs: A dictionary, include access information. Pay attention that it's not safe to save username and password in memory, so suggest each driver use them to get session instead of save them in memory directly. """ self.storage_id = kwargs.get('storage_id', None) def delete_storage(self, context): """Cleanup storage device information from driver""" pass def add_storage(self, kwargs): """Add storage device information to driver""" pass @abc.abstractmethod def reset_connection(self, context, **kwargs): """ Reset connection with backend with new args """ pass @abc.abstractmethod def get_storage(self, context): """Get storage device information from storage system""" pass @abc.abstractmethod def list_storage_pools(self, context): """List all storage pools from storage system.""" pass @abc.abstractmethod def list_volumes(self, context): """List all storage volumes from storage system.""" pass def list_controllers(self, context): """List all storage controllers from storage system.""" raise NotImplementedError( "Driver API list_controllers() is not Implemented") def list_ports(self, context): """List all ports from storage system.""" raise NotImplementedError( "Driver API list_ports() is not Implemented") def list_disks(self, context): """List all disks from storage system.""" raise NotImplementedError( "Driver API list_disks() is not Implemented") @abc.abstractmethod def add_trap_config(self, context, trap_config): """Config the trap receiver in storage system.""" pass @abc.abstractmethod def remove_trap_config(self, context, trap_config): """Remove trap receiver configuration from storage system.""" pass @staticmethod def parse_alert(context, alert): """Parse alert data got from snmp trap server.""" """ Alert Model Description *****Filled from driver side *********************** alert_id Unique identification for a given alert type alert_name Unique name for a given alert type severity Severity of the alert category Category of alert generated type Type of the alert generated sequence_number Sequence number for the alert, uniquely identifies a given alert instance used for clearing the alert occur_time Time at which alert is generated from device in epoch format description Possible cause description or other details about the alert recovery_advice Some suggestion for handling the given alert resource_type Resource type of device/source generating alert location Detailed info about the tracing the alerting device such as slot, rack, component, parts etc ***************************************************** """ pass @abc.abstractmethod def list_alerts(self, context, query_para=None): """List all current alerts from storage system.""" """ query_para is an optional para which contains 'begin_time' and 'end_time' (in milliseconds) which is to be used to filter alerts at driver """ pass @abc.abstractmethod def clear_alert(self, context, sequence_number): """Clear alert from storage system.""" pass def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): """Collect performance metrics from storage system.""" """ Input: context: context information storage_id: storage identifier resource_metrics: dictionary represents the collection configuration Example: resource_metrics = {'storagePool': ['readThroughput', 'writeThroughput', 'responseTime'], 'volume': ['readThroughput', 'writeThroughput']} start_time Time from which the performance metric to be collected It is in epoch format in milliseconds end_time Time until which the performance metric to be collected It is in epoch format in milliseconds Response: List of metric with details Format : [[Metric(name=metric_1, labels={'key_1': value_1, 'key_2': value_2,}, values={timestamp_0: value_0, timestamp_n: value_n,})] Example: [[Metric(name='responseTime', labels={'storage_id': '1f8d6982-2ac2-4fa9-95ef-78f359de', 'resource_type': 'storagePool'}, values={1616560337249: 96.12081735538251}), Metric(name='throughput', labels={'storage_id': '1f8d6982-2ac2-4fa9-95ef-78f359de', 'resource_type': 'storagePool'}, values={1616560337249: 90.08194398331271})] """ pass def list_quotas(self, context): """List all quotas from storage system.""" raise NotImplementedError( "Driver API list_quotas() is not Implemented") def list_filesystems(self, context): """List all filesystems from storage system.""" raise NotImplementedError( "Driver API list_filesystems() is not Implemented") def list_qtrees(self, context): """List all qtrees from storage system.""" raise NotImplementedError( "Driver API list_qtrees() is not Implemented") def list_shares(self, context): """List all shares from storage system.""" raise NotImplementedError( "Driver API list_shares() is not Implemented") @staticmethod def get_capabilities(context, filters=None): """Get capability of driver: is_historic (bool): required performance_metric_retention_window (int): optional, default is None collect_interval (int): optional, default is TelemetryCollection.DEF_PERFORMANCE_COLLECTION_INTERVAL in common/constants.py failed_job_collect_interval (int): optional, default is TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL in common/constants.py resource_metrics (dict): required, please refer to STORAGE_CAPABILITIES_SCHEMA in api/schemas/storage_capabilities_schema.py. For example: { 'is_historic': True, 'performance_metric_retention_window': 4500, 'collect_interval': 900 'failed_job_collect_interval': 900, 'resource_metrics': { 'storage': { 'iops': { 'unit': 'IOPS', 'description': 'Read/write operations per second' }, ... }, ... } } """ pass def list_storage_host_initiators(self, context): """List all storage initiators from storage system.""" """ *********Model description********** native_storage_host_initiator_id: Native id at backend side(mandatory) native_storage_host_id: Native id of host at backend side if associated name: Name of the initiator description: Description of the initiator alias: Alias of the initiator type: initiator type (fc, iscsi, nvme_over_roce) status: Health status(normal, offline, abnormal, unknown) wwn: Worldwide name storage_id: Storage id at delfin side """ raise NotImplementedError( "Driver API list_storage_host_initiators() is not Implemented") def list_storage_hosts(self, context): """List all storage hosts from storage system.""" """ *********Model description********** native_storage_host_id: Native id of host at backend side(mandatory) name: Name of the host description: Description of the host os_type: operating system type status: Health status(normal, offline, abnormal, unknown) ip_address: Ip address of the host storage_id: Storage id at delfin side """ raise NotImplementedError( "Driver API list_storage_hosts() is not Implemented") def list_storage_host_groups(self, context): """ Returns a dict with following 'storage_host_groups': , 'storage_host_grp_host_rels': , """ """ ********* storage_host_groups Model description********** native_storage_host_group_id: Native id of host grp at backend side (mandatory) name: Name of the host grp description: Description of the host grp storage_hosts: List of associated hosts if any(, separated list) storage_id: Storage id at delfin side """ raise NotImplementedError( "Driver API list_storage_host_groups() is not Implemented") def list_port_groups(self, context): """ Returns a dict with following 'port_groups': , 'port_grp_port_rels': , """ """ ********* port_groups Model description********** native_port_group_id: Native id of port grp at backend side (mandatory) name: Name of the port grp description: Description of the port grp ports: List of associated ports if any(, separated list) storage_id: Storage id at delfin side """ raise NotImplementedError( "Driver API list_port_groups() is not Implemented") def list_volume_groups(self, context): """ Returns a dict with following 'volume_groups': , 'vol_grp_vol_rels': , """ """ ********* volume_groups Model description********** native_volume_group_id: Native id of volume grp at backend side (mandatory) name: Name of the volume grp description: Description of the volume grp volumes: List of associated volumes if any(, separated list) storage_id: Storage id at delfin side """ raise NotImplementedError( "Driver API list_volume_groups() is not Implemented") def list_masking_views(self, context): """List all masking views from storage system.""" """ *********Model description********** native_masking_view_id: Native id of volume grp at backend side (mandatory) name: Name of the masking view description: Description of the masking view native_storage_host_group_id: Native id of host grp at backend side native_port_group_id: Native id of port grp at backend side native_volume_group_id: Native id of volume grp at backend side native_storage_host_id: Native id of host at backend side native_volume_id: Native id of volume at backend side storage_id: Storage id at delfin side Masking view filling guidelines: Driver can have different backend scenarios such as - Direct host -> direct volume mapping - Direct host -> direct volume -> direct port mapping - Direct host -> volume group mapping - Host grp -> volume group mapping - Host grp -> direct volume(s) mapping So driver need to fill in group to item order based on availability as given below From host side: Mandatorily one of the (native_storage_host_group_id | native_storage_host_id) From volume side: Mandatorily one of the (native_volume_group_id | native_volume_id) From port side: Optionally (native_port_group_id) """ raise NotImplementedError( "Driver API list_masking_views() is not Implemented") def get_alert_sources(self, context): return [] def get_latest_perf_timestamp(self, context): """Get the timestamp of the latest performance data of the device""" pass ================================================ FILE: delfin/drivers/fake_storage/__init__.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random import decorator import math import time import six from eventlet import greenthread from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from delfin import exception, db from delfin.common import constants from delfin.common.constants import ResourceType, StorageMetric, \ StoragePoolMetric, VolumeMetric, ControllerMetric, PortMetric, \ DiskMetric, FileSystemMetric from delfin.drivers import driver CONF = cfg.CONF fake_opts = [ cfg.StrOpt('fake_pool_range', default='1-100', help='The range of pool number for one device.'), cfg.StrOpt('fake_volume_range', default='1-2000', help='The range of volume number for one device.'), cfg.StrOpt('fake_api_time_range', default='0.1-0.5', help='The range of time cost for each API.'), cfg.StrOpt('fake_page_query_limit', default='500', help='The limitation of volumes for each query.'), ] CONF.register_opts(fake_opts, "fake_driver") LOG = log.getLogger(__name__) MIN_WAIT, MAX_WAIT = 0.1, 0.5 MIN_POOL, MAX_POOL = 1, 100 MIN_PORTS, MAX_PORTS = 1, 10 MIN_DISK, MAX_DISK = 1, 100 MIN_VOLUME, MAX_VOLUME = 1, 2000 MIN_CONTROLLERS, MAX_CONTROLLERS = 1, 5 PAGE_LIMIT = 500 MIN_STORAGE, MAX_STORAGE = 1, 10 MIN_QUOTA, MAX_QUOTA = 1, 100 MIN_FS, MAX_FS = 1, 10 MIN_QTREE, MAX_QTREE = 1, 100 MIN_SHARE, MAX_SHARE = 1, 100 # Minimum sampling interval MINIMUM_SAMPLE_DURATION_IN_MS = 60 * 1000 # count of instances for each resource type RESOURCE_COUNT_DICT = { "storage": 1, "storagePool": MAX_POOL, "volume": MAX_VOLUME, "port": MAX_PORTS, "controller": MAX_CONTROLLERS, "disk": MAX_DISK, "filesystem": MAX_FS, } # Min and max are currently set to 1 to make sure at least one relation can be # built in fake driver for host mapping elements MIN_STORAGE_HOST_INITIATORS, MAX_STORAGE_HOST_INITIATORS = 1, 3 MIN_STORAGE_HOSTS, MAX_STORAGE_HOSTS = 1, 5 MIN_STORAGE_HOST_GROUPS, MAX_STORAGE_HOST_GROUPS = 1, 5 MIN_VOLUME_GROUPS, MAX_VOLUME_GROUPS = 1, 5 MIN_PORT_GROUPS, MAX_PORT_GROUPS = 1, 5 MAX_GROUP_RESOURCES_SIZE = 5 MIN_MASKING_VIEWS, MAX_MASKING_VIEWS = 1, 5 NON_GROUP_BASED_MASKING, GROUP_BASED_MASKING = 0, 1 def get_range_val(range_str, t): try: rng = range_str.split('-') if len(rng) != 2: raise exception.InvalidInput min_val = t(rng[0]) max_val = t(rng[1]) return min_val, max_val except Exception: LOG.error("Invalid range: {0}".format(range_str)) raise exception.InvalidInput def wait_random(low, high): @decorator.decorator def _wait(f, *a, **k): rd = random.randint(0, 100) secs = low + (high - low) * rd / 100 greenthread.sleep(secs) return f(*a, **k) return _wait class FakeStorageDriver(driver.StorageDriver): """FakeStorageDriver shows how to implement the StorageDriver, it also plays a role as faker to fake data for being tested by clients. """ def __init__(self, **kwargs): super().__init__(**kwargs) global MIN_WAIT, MAX_WAIT, MIN_POOL, MAX_POOL, MIN_VOLUME, MAX_VOLUME global PAGE_LIMIT MIN_WAIT, MAX_WAIT = get_range_val( CONF.fake_driver.fake_api_time_range, float) MIN_POOL, MAX_POOL = get_range_val( CONF.fake_driver.fake_pool_range, int) MIN_VOLUME, MAX_VOLUME = get_range_val( CONF.fake_driver.fake_volume_range, int) PAGE_LIMIT = int(CONF.fake_driver.fake_page_query_limit) self.rd_volumes_count = random.randint(MIN_VOLUME, MAX_VOLUME) self.rd_ports_count = random.randint(MIN_PORTS, MAX_PORTS) self.rd_storage_hosts_count = random.randint(MIN_STORAGE_HOSTS, MAX_STORAGE_HOSTS) def _get_random_capacity(self): total = random.randint(1000, 2000) used = int(random.randint(0, 100) * total / 100) free = total - used return total, used, free def reset_connection(self, context, **kwargs): pass @wait_random(MIN_WAIT, MAX_WAIT) def get_storage(self, context): # Do something here sn = six.text_type(uuidutils.generate_uuid()) try: # use existing sn if already registered storage storage = db.storage_get(context, self.storage_id) if storage: sn = storage['serial_number'] except exception.StorageNotFound: LOG.debug('Registering new storage') except Exception: LOG.info('Error while retrieving storage from DB') total, used, free = self._get_random_capacity() raw = random.randint(2000, 3000) subscribed = random.randint(3000, 4000) return { 'name': 'fake_driver', 'description': 'fake driver.', 'vendor': 'fake_vendor', 'model': 'fake_model', 'status': 'normal', 'serial_number': sn, 'firmware_version': '1.0.0', 'location': 'HK', 'total_capacity': total, 'used_capacity': used, 'free_capacity': free, 'raw_capacity': raw, 'subscribed_capacity': subscribed } @wait_random(MIN_WAIT, MAX_WAIT) def list_storage_pools(self, ctx): rd_pools_count = random.randint(MIN_POOL, MAX_POOL) LOG.info("###########fake_pools number for %s: %d" % (self.storage_id, rd_pools_count)) pool_list = [] for idx in range(rd_pools_count): total, used, free = self._get_random_capacity() p = { "name": "storagePool_" + str(idx), "storage_id": self.storage_id, "native_storage_pool_id": "storagePool_" + str(idx), "description": "Fake Pool", "status": "normal", "total_capacity": total, "used_capacity": used, "free_capacity": free, } pool_list.append(p) return pool_list def list_volumes(self, ctx): # Get a random number as the volume count. rd_volumes_count = self.rd_volumes_count LOG.info("###########fake_volumes number for %s: %d" % ( self.storage_id, rd_volumes_count)) loops = math.ceil(rd_volumes_count / PAGE_LIMIT) volume_list = [] for idx in range(loops): start = idx * PAGE_LIMIT end = (idx + 1) * PAGE_LIMIT if idx == (loops - 1): end = rd_volumes_count vs = self._get_volume_range(start, end) volume_list = volume_list + vs return volume_list def list_controllers(self, ctx): rd_controllers_count = random.randint(MIN_CONTROLLERS, MAX_CONTROLLERS) LOG.info("###########fake_controllers for %s: %d" % (self.storage_id, rd_controllers_count)) ctrl_list = [] for idx in range(rd_controllers_count): total, used, free = self._get_random_capacity() cpu = ["Intel Xenon", "Intel Core ix", "ARM"] sts = list(constants.ControllerStatus.ALL) sts_len = len(constants.ControllerStatus.ALL) - 1 c = { "name": "controller_" + str(idx), "storage_id": self.storage_id, "native_controller_id": "controller_" + str(idx), "location": "loc_" + str(random.randint(0, 99)), "status": sts[random.randint(0, sts_len)], "memory_size": total, "cpu_info": cpu[random.randint(0, 2)], "soft_version": "ver_" + str(random.randint(0, 999)), } ctrl_list.append(c) return ctrl_list def list_ports(self, ctx): rd_ports_count = self.rd_ports_count LOG.info("###########fake_ports for %s: %d" % (self.storage_id, rd_ports_count)) port_list = [] for idx in range(rd_ports_count): max_s, normal, remain = self._get_random_capacity() conn_sts = list(constants.PortConnectionStatus.ALL) conn_sts_len = len(constants.PortConnectionStatus.ALL) - 1 health_sts = list(constants.PortHealthStatus.ALL) health_sts_len = len(constants.PortHealthStatus.ALL) - 1 port_type = list(constants.PortType.ALL) port_type_len = len(constants.PortType.ALL) - 1 logic_type = list(constants.PortLogicalType.ALL) logic_type_len = len(constants.PortLogicalType.ALL) - 1 c = { "name": "port_" + str(idx), "storage_id": self.storage_id, "native_port_id": "port_" + str(idx), "location": "location_" + str(random.randint(0, 99)), "connection_status": conn_sts[ random.randint(0, conn_sts_len)], "health_status": health_sts[ random.randint(0, health_sts_len)], "type": port_type[ random.randint(0, port_type_len)], "logical_type": logic_type[ random.randint(0, logic_type_len)], "speed": normal, "max_speed": max_s, "native_parent_id": "parent_id_" + str(random.randint(0, 99)), "wwn": "wwn_" + str(random.randint(0, 9999)), "mac_address": "mac_" + str(random.randint(0, 9999)), "ipv4": "0.0.0.0", "ipv4_mask": "255.255.255.0", "ipv6": "0", "ipv6_mask": "::", } port_list.append(c) return port_list def list_disks(self, ctx): rd_disks_count = random.randint(MIN_DISK, MAX_DISK) LOG.info("###########fake_disks for %s: %d" % (self.storage_id, rd_disks_count)) disk_list = [] for idx in range(rd_disks_count): max_s, normal, remain = self._get_random_capacity() manufacturer = ["Intel", "Seagate", "WD", "Crucial", "HP"] sts = list(constants.DiskStatus.ALL) sts_len = len(constants.DiskStatus.ALL) - 1 physical_type = list(constants.DiskPhysicalType.ALL) physical_type_len = len(constants.DiskPhysicalType.ALL) - 1 logic_type = list(constants.DiskLogicalType.ALL) logic_type_len = len(constants.DiskLogicalType.ALL) - 1 c = { "name": "disk_" + str(idx), "storage_id": self.storage_id, "native_disk_id": "disk_" + str(idx), "serial_number": "serial_" + str(random.randint(0, 9999)), "manufacturer": manufacturer[random.randint(0, 4)], "model": "model_" + str(random.randint(0, 9999)), "firmware": "firmware_" + str(random.randint(0, 9999)), "speed": normal, "capacity": max_s, "status": sts[random.randint(0, sts_len)], "physical_type": physical_type[ random.randint(0, physical_type_len)], "logical_type": logic_type[random.randint(0, logic_type_len)], "health_score": random.randint(0, 100), "native_diskgroup_id": "dg_id_" + str(random.randint(0, 99)), "location": "location_" + str(random.randint(0, 99)), } disk_list.append(c) return disk_list def list_quotas(self, ctx): rd_quotas_count = random.randint(MIN_QUOTA, MAX_QUOTA) LOG.info("###########fake_quotas for %s: %d" % (self.storage_id, rd_quotas_count)) quota_list = [] for idx in range(rd_quotas_count): qtype = list(constants.QuotaType.ALL) qtype_len = len(constants.QuotaType.ALL) - 1 max_cap = random.randint(1111, 9999) fslimit = random.randint(max_cap * 7, max_cap * 8) fhlimit = random.randint(max_cap * 8, max_cap * 9) slimit = random.randint(max_cap * 7000, max_cap * 8000) hlimit = random.randint(max_cap * 8000, max_cap * 9000) user_group = ['usr_', 'grp_'] q = { "native_quota_id": "quota_" + str(idx), "type": qtype[random.randint(0, qtype_len)], "storage_id": self.storage_id, "native_filesystem_id": "quota_" + str(random.randint(0, 99)), "native_qtree_id": "qtree_" + str(random.randint(0, 99)), "capacity_hard_limit": hlimit, "capacity_soft_limit": slimit, "file_hard_limit": fhlimit, "file_soft_limit": fslimit, "file_count": random.randint(0, max_cap * 10), "used_capacity": random.randint(0, max_cap * 10000), "user_group_name": user_group[random.randint(0, 1)] + str(random.randint(0, 99)), } quota_list.append(q) return quota_list def list_filesystems(self, ctx): rd_filesystems_count = random.randint(MIN_FS, MAX_FS) LOG.info("###########fake_filesystems for %s: %d" % (self.storage_id, rd_filesystems_count)) filesystem_list = [] for idx in range(rd_filesystems_count): total, used, free = self._get_random_capacity() boolean = [True, False] sts = list(constants.FilesystemStatus.ALL) sts_len = len(constants.FilesystemStatus.ALL) - 1 worm = list(constants.WORMType.ALL) worm_len = len(constants.WORMType.ALL) - 1 alloc_type = list(constants.VolumeType.ALL) alloc_type_len = len(constants.VolumeType.ALL) - 1 security = list(constants.NASSecurityMode.ALL) security_len = len(constants.NASSecurityMode.ALL) - 1 f = { "name": "filesystem_" + str(idx), "storage_id": self.storage_id, "native_filesystem_id": "filesystem_" + str(idx), "native_pool_id": "storagePool_" + str(idx), "status": sts[random.randint(0, sts_len)], "type": alloc_type[random.randint(0, alloc_type_len)], "security_mode": security[random.randint(0, security_len)], "total_capacity": total, "used_capacity": used, "free_capacity": free, "worm": worm[random.randint(0, worm_len)], "deduplicated": boolean[random.randint(0, 1)], "compressed": boolean[random.randint(0, 1)], } filesystem_list.append(f) return filesystem_list def list_qtrees(self, ctx): rd_qtrees_count = random.randint(MIN_QTREE, MAX_QTREE) LOG.info("###########fake_qtrees for %s: %d" % (self.storage_id, rd_qtrees_count)) qtree_list = [] for idx in range(rd_qtrees_count): security = list(constants.NASSecurityMode.ALL) security_len = len(constants.NASSecurityMode.ALL) - 1 t = { "name": "qtree_" + str(idx), "storage_id": self.storage_id, "native_qtree_id": "qtree_" + str(idx), "native_filesystem_id": "filesystem_" + str(random.randint(0, 99)), "security_mode": security[random.randint(0, security_len)], "path": "/path/qtree_" + str(random.randint(0, 99)), } qtree_list.append(t) return qtree_list def list_shares(self, ctx): rd_shares_count = random.randint(MIN_SHARE, MAX_SHARE) LOG.info("###########fake_shares for %s: %d" % (self.storage_id, rd_shares_count)) share_list = [] for idx in range(rd_shares_count): pro = list(constants.ShareProtocol.ALL) pro_len = len(constants.ShareProtocol.ALL) - 1 c = { "name": "share_" + str(idx), "storage_id": self.storage_id, "native_share_id": "share_" + str(idx), "native_filesystem_id": "filesystem_" + str(random.randint(0, 99)), "native_qtree_id": "qtree_" + str(random.randint(0, 99)), "protocol": pro[random.randint(0, pro_len)], "path": "/path/share_" + str(random.randint(0, 99)), } share_list.append(c) return share_list def add_trap_config(self, context, trap_config): pass # Fakedriver do not require to add trap config def remove_trap_config(self, context, trap_config): pass # Fakedriver do not require to remove trap config @staticmethod def parse_alert(context, alert): pass # Fakedriver do not require to parse alert def clear_alert(self, context, alert): pass # Fakedriver do not require to clear alert def list_alerts(self, context, query_para=None): alert_list = [{ "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 100, 'alert_name': 'SNMP connect failed', 'category': 'Fault', 'severity': 'Major', 'type': 'OperationalViolation', 'location': 'NetworkEntity=entity1', 'description': "SNMP connection to the storage failed.", 'recovery_advice': "Check snmp configurations.", 'occur_time': int(time.time()) }, { "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 101, 'alert_name': 'Link state down', 'category': 'Fault', 'severity': 'Critical', 'type': 'CommunicationsAlarm', 'location': 'NetworkEntity=entity2', 'description': "Backend link has gone down", 'recovery_advice': "Recheck the network configuration setting.", 'occur_time': int(time.time()) }, { "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 102, 'alert_name': 'Power failure', 'category': 'Fault', 'severity': 'Fatal', 'type': 'OperationalViolation', 'location': 'NetworkEntity=entity3', 'description': "Power failure occurred. ", 'recovery_advice': "Investigate power connection.", 'occur_time': int(time.time()) }, { "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 103, 'alert_name': 'Communication failure', 'category': 'Fault', 'severity': 'Critical', 'type': 'CommunicationsAlarm', 'location': 'NetworkEntity=network1', 'description': "Communication link gone down", 'recovery_advice': "Consult network administrator", 'occur_time': int(time.time()) }] return alert_list @wait_random(MIN_WAIT, MAX_WAIT) def _get_volume_range(self, start, end): volume_list = [] for i in range(start, end): total, used, free = self._get_random_capacity() v = { "name": "volume_" + str(i), "storage_id": self.storage_id, "description": "Fake Volume", "status": "normal", "native_volume_id": "volume_" + str(i), "wwn": "fake_wwn_" + str(i), "total_capacity": total, "used_capacity": used, "free_capacity": free, } volume_list.append(v) return volume_list def _get_random_performance(self, metric_list, start_time, end_time): def get_random_timestamp_value(): rtv = {} timestamp = start_time while timestamp < end_time: rtv[timestamp] = random.uniform(1, 100) timestamp += MINIMUM_SAMPLE_DURATION_IN_MS return rtv # The sample performance_params after filling looks like, # performance_params = {timestamp1: value1, timestamp2: value2} performance_params = {} for key in metric_list.keys(): performance_params[key] = get_random_timestamp_value() return performance_params @wait_random(MIN_WAIT, MAX_WAIT) def get_resource_perf_metrics(self, storage_id, start_time, end_time, resource_type, metric_list): LOG.info("###########collecting metrics for resource %s: from" " storage %s" % (resource_type, self.storage_id)) resource_metrics = [] resource_count = RESOURCE_COUNT_DICT[resource_type] for i in range(resource_count): labels = {'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_type + '_' + str(i), 'type': 'RAW'} fake_metrics = self._get_random_performance(metric_list, start_time, end_time) for key in metric_list.keys(): labels['unit'] = metric_list[key]['unit'] m = constants.metric_struct(name=key, labels=labels, values=fake_metrics[key]) resource_metrics.append(copy.deepcopy(m)) return resource_metrics @wait_random(MIN_WAIT, MAX_WAIT) def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): """Collects performance metric for the given interval""" merged_metrics = [] for key in resource_metrics.keys(): m = self.get_resource_perf_metrics(storage_id, start_time, end_time, key, resource_metrics[key]) merged_metrics += m return merged_metrics @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver""" return { 'is_historic': False, 'performance_metric_retention_window': 4500, 'resource_metrics': { ResourceType.STORAGE: { StorageMetric.THROUGHPUT.name: { "unit": StorageMetric.THROUGHPUT.unit, "description": StorageMetric.THROUGHPUT.description }, StorageMetric.RESPONSE_TIME.name: { "unit": StorageMetric.RESPONSE_TIME.unit, "description": StorageMetric.RESPONSE_TIME.description }, StorageMetric.READ_RESPONSE_TIME.name: { "unit": StorageMetric.READ_RESPONSE_TIME.unit, "description": StorageMetric.READ_RESPONSE_TIME.description }, StorageMetric.WRITE_RESPONSE_TIME.name: { "unit": StorageMetric.WRITE_RESPONSE_TIME.unit, "description": StorageMetric.WRITE_RESPONSE_TIME.description }, StorageMetric.IOPS.name: { "unit": StorageMetric.IOPS.unit, "description": StorageMetric.IOPS.description }, StorageMetric.READ_THROUGHPUT.name: { "unit": StorageMetric.READ_THROUGHPUT.unit, "description": StorageMetric.READ_THROUGHPUT.description }, StorageMetric.WRITE_THROUGHPUT.name: { "unit": StorageMetric.WRITE_THROUGHPUT.unit, "description": StorageMetric.WRITE_THROUGHPUT.description }, StorageMetric.READ_IOPS.name: { "unit": StorageMetric.READ_IOPS.unit, "description": StorageMetric.READ_IOPS.description }, StorageMetric.WRITE_IOPS.name: { "unit": StorageMetric.WRITE_IOPS.unit, "description": StorageMetric.WRITE_IOPS.description }, }, ResourceType.STORAGE_POOL: { StoragePoolMetric.THROUGHPUT.name: { "unit": StoragePoolMetric.THROUGHPUT.unit, "description": StoragePoolMetric.THROUGHPUT.description }, StoragePoolMetric.RESPONSE_TIME.name: { "unit": StoragePoolMetric.RESPONSE_TIME.unit, "description": StoragePoolMetric.RESPONSE_TIME.description }, StoragePoolMetric.IOPS.name: { "unit": StoragePoolMetric.IOPS.unit, "description": StoragePoolMetric.IOPS.description }, StoragePoolMetric.READ_THROUGHPUT.name: { "unit": StoragePoolMetric.READ_THROUGHPUT.unit, "description": StoragePoolMetric.READ_THROUGHPUT.description }, StoragePoolMetric.WRITE_THROUGHPUT.name: { "unit": StoragePoolMetric.WRITE_THROUGHPUT.unit, "description": StoragePoolMetric.WRITE_THROUGHPUT.description }, StoragePoolMetric.READ_IOPS.name: { "unit": StoragePoolMetric.READ_IOPS.unit, "description": StoragePoolMetric.READ_IOPS.description }, StoragePoolMetric.WRITE_IOPS.name: { "unit": StoragePoolMetric.WRITE_IOPS.unit, "description": StoragePoolMetric.WRITE_IOPS.description }, }, ResourceType.VOLUME: { VolumeMetric.THROUGHPUT.name: { "unit": VolumeMetric.THROUGHPUT.unit, "description": VolumeMetric.THROUGHPUT.description }, VolumeMetric.RESPONSE_TIME.name: { "unit": VolumeMetric.RESPONSE_TIME.unit, "description": VolumeMetric.RESPONSE_TIME.description }, VolumeMetric.READ_RESPONSE_TIME.name: { "unit": VolumeMetric.READ_RESPONSE_TIME.unit, "description": VolumeMetric.READ_RESPONSE_TIME.description }, VolumeMetric.WRITE_RESPONSE_TIME.name: { "unit": VolumeMetric.WRITE_RESPONSE_TIME.unit, "description": VolumeMetric.WRITE_RESPONSE_TIME.description }, VolumeMetric.IOPS.name: { "unit": VolumeMetric.IOPS.unit, "description": VolumeMetric.IOPS.description }, VolumeMetric.READ_THROUGHPUT.name: { "unit": VolumeMetric.READ_THROUGHPUT.unit, "description": VolumeMetric.READ_THROUGHPUT.description }, VolumeMetric.WRITE_THROUGHPUT.name: { "unit": VolumeMetric.WRITE_THROUGHPUT.unit, "description": VolumeMetric.WRITE_THROUGHPUT.description }, VolumeMetric.READ_IOPS.name: { "unit": VolumeMetric.READ_IOPS.unit, "description": VolumeMetric.READ_IOPS.description }, VolumeMetric.WRITE_IOPS.name: { "unit": VolumeMetric.WRITE_IOPS.unit, "description": VolumeMetric.WRITE_IOPS.description }, VolumeMetric.CACHE_HIT_RATIO.name: { "unit": VolumeMetric.CACHE_HIT_RATIO.unit, "description": VolumeMetric.CACHE_HIT_RATIO.description }, VolumeMetric.READ_CACHE_HIT_RATIO.name: { "unit": VolumeMetric.READ_CACHE_HIT_RATIO.unit, "description": VolumeMetric.READ_CACHE_HIT_RATIO.description }, VolumeMetric.WRITE_CACHE_HIT_RATIO.name: { "unit": VolumeMetric.WRITE_CACHE_HIT_RATIO.unit, "description": VolumeMetric.WRITE_CACHE_HIT_RATIO.description }, VolumeMetric.IO_SIZE.name: { "unit": VolumeMetric.IO_SIZE.unit, "description": VolumeMetric.IO_SIZE.description }, VolumeMetric.READ_IO_SIZE.name: { "unit": VolumeMetric.READ_IO_SIZE.unit, "description": VolumeMetric.READ_IO_SIZE.description }, VolumeMetric.WRITE_IO_SIZE.name: { "unit": VolumeMetric.WRITE_IO_SIZE.unit, "description": VolumeMetric.WRITE_IO_SIZE.description }, }, ResourceType.CONTROLLER: { ControllerMetric.THROUGHPUT.name: { "unit": ControllerMetric.THROUGHPUT.unit, "description": ControllerMetric.THROUGHPUT.description }, ControllerMetric.RESPONSE_TIME.name: { "unit": ControllerMetric.RESPONSE_TIME.unit, "description": ControllerMetric.RESPONSE_TIME.description }, ControllerMetric.IOPS.name: { "unit": ControllerMetric.IOPS.unit, "description": ControllerMetric.IOPS.description }, ControllerMetric.READ_THROUGHPUT.name: { "unit": ControllerMetric.READ_THROUGHPUT.unit, "description": ControllerMetric.READ_THROUGHPUT.description }, ControllerMetric.WRITE_THROUGHPUT.name: { "unit": ControllerMetric.WRITE_THROUGHPUT.unit, "description": ControllerMetric.WRITE_THROUGHPUT.description }, ControllerMetric.READ_IOPS.name: { "unit": ControllerMetric.READ_IOPS.unit, "description": ControllerMetric.READ_IOPS.description }, ControllerMetric.WRITE_IOPS.name: { "unit": ControllerMetric.WRITE_IOPS.unit, "description": ControllerMetric.WRITE_IOPS.description }, ControllerMetric.CPU_USAGE.name: { "unit": ControllerMetric.CPU_USAGE.unit, "description": ControllerMetric.CPU_USAGE.description } }, ResourceType.PORT: { PortMetric.THROUGHPUT.name: { "unit": PortMetric.THROUGHPUT.unit, "description": PortMetric.THROUGHPUT.description }, PortMetric.RESPONSE_TIME.name: { "unit": PortMetric.RESPONSE_TIME.unit, "description": PortMetric.RESPONSE_TIME.description }, PortMetric.IOPS.name: { "unit": PortMetric.IOPS.unit, "description": PortMetric.IOPS.description }, PortMetric.READ_THROUGHPUT.name: { "unit": PortMetric.READ_THROUGHPUT.unit, "description": PortMetric.READ_THROUGHPUT.description }, PortMetric.WRITE_THROUGHPUT.name: { "unit": PortMetric.WRITE_THROUGHPUT.unit, "description": PortMetric.WRITE_THROUGHPUT.description }, PortMetric.READ_IOPS.name: { "unit": PortMetric.READ_IOPS.unit, "description": PortMetric.READ_IOPS.description }, PortMetric.WRITE_IOPS.name: { "unit": PortMetric.WRITE_IOPS.unit, "description": PortMetric.WRITE_IOPS.description }, }, ResourceType.DISK: { DiskMetric.THROUGHPUT.name: { "unit": DiskMetric.THROUGHPUT.unit, "description": DiskMetric.THROUGHPUT.description }, DiskMetric.RESPONSE_TIME.name: { "unit": DiskMetric.RESPONSE_TIME.unit, "description": DiskMetric.RESPONSE_TIME.description }, DiskMetric.IOPS.name: { "unit": DiskMetric.IOPS.unit, "description": DiskMetric.IOPS.description }, DiskMetric.READ_THROUGHPUT.name: { "unit": DiskMetric.READ_THROUGHPUT.unit, "description": DiskMetric.READ_THROUGHPUT.description }, DiskMetric.WRITE_THROUGHPUT.name: { "unit": DiskMetric.WRITE_THROUGHPUT.unit, "description": DiskMetric.WRITE_THROUGHPUT.description }, DiskMetric.READ_IOPS.name: { "unit": DiskMetric.READ_IOPS.unit, "description": DiskMetric.READ_IOPS.description }, DiskMetric.WRITE_IOPS.name: { "unit": DiskMetric.WRITE_IOPS.unit, "description": DiskMetric.WRITE_IOPS.description }, }, ResourceType.FILESYSTEM: { FileSystemMetric.THROUGHPUT.name: { "unit": FileSystemMetric.THROUGHPUT.unit, "description": FileSystemMetric.THROUGHPUT.description }, FileSystemMetric.READ_RESPONSE_TIME.name: { "unit": FileSystemMetric.READ_RESPONSE_TIME.unit, "description": FileSystemMetric.READ_RESPONSE_TIME.description }, FileSystemMetric.WRITE_RESPONSE_TIME.name: { "unit": FileSystemMetric.WRITE_RESPONSE_TIME.unit, "description": FileSystemMetric.WRITE_RESPONSE_TIME.description }, FileSystemMetric.IOPS.name: { "unit": FileSystemMetric.IOPS.unit, "description": FileSystemMetric.IOPS.description }, FileSystemMetric.READ_THROUGHPUT.name: { "unit": FileSystemMetric.READ_THROUGHPUT.unit, "description": FileSystemMetric.READ_THROUGHPUT.description }, FileSystemMetric.WRITE_THROUGHPUT.name: { "unit": FileSystemMetric.WRITE_THROUGHPUT.unit, "description": FileSystemMetric.WRITE_THROUGHPUT.description }, FileSystemMetric.READ_IOPS.name: { "unit": FileSystemMetric.READ_IOPS.unit, "description": FileSystemMetric.READ_IOPS.description }, FileSystemMetric.WRITE_IOPS.name: { "unit": FileSystemMetric.WRITE_IOPS.unit, "description": FileSystemMetric.WRITE_IOPS.description }, FileSystemMetric.IO_SIZE.name: { "unit": FileSystemMetric.IO_SIZE.unit, "description": FileSystemMetric.IO_SIZE.description }, FileSystemMetric.READ_IO_SIZE.name: { "unit": FileSystemMetric.READ_IO_SIZE.unit, "description": FileSystemMetric.READ_IO_SIZE.description }, FileSystemMetric.WRITE_IO_SIZE.name: { "unit": FileSystemMetric.WRITE_IO_SIZE.unit, "description": FileSystemMetric.WRITE_IO_SIZE.description }, }, } } def list_storage_host_initiators(self, ctx): rd_storage_host_initiators_count = random.randint( MIN_STORAGE_HOST_INITIATORS, MAX_STORAGE_HOST_INITIATORS) LOG.info("###########fake_storage_host_initiators for %s: %d" % (self.storage_id, rd_storage_host_initiators_count)) storage_host_initiators_list = [] for idx in range(rd_storage_host_initiators_count): f = { "name": "storage_host_initiator_" + str(idx), "description": "storage_host_initiator_" + str(idx), "alias": "storage_host_initiator_" + str(idx), "storage_id": self.storage_id, "native_storage_host_initiator_id": "storage_host_initiator_" + str(idx), "wwn": "wwn_" + str(idx), "status": "Normal", "native_storage_host_id": "storage_host_" + str(idx), } storage_host_initiators_list.append(f) return storage_host_initiators_list def list_storage_hosts(self, ctx): rd_storage_hosts_count = self.rd_storage_hosts_count LOG.info("###########fake_storage_hosts for %s: %d" % (self.storage_id, rd_storage_hosts_count)) storage_host_list = [] for idx in range(rd_storage_hosts_count): f = { "name": "storage_host_" + str(idx), "description": "storage_host_" + str(idx), "storage_id": self.storage_id, "native_storage_host_id": "storage_host_" + str(idx), "os_type": "linux", "status": "Normal", "ip_address": "1.2.3." + str(idx) } storage_host_list.append(f) return storage_host_list def list_storage_host_groups(self, ctx): rd_storage_host_groups_count = random.randint( MIN_STORAGE_HOST_GROUPS, MAX_STORAGE_HOST_GROUPS) LOG.info("###########fake_storage_host_groups for %s: %d" % (self.storage_id, rd_storage_host_groups_count)) storage_host_grp_list = [] for idx in range(rd_storage_host_groups_count): # Create hosts in hosts group host_name_list = [] storage_hosts_count = self.rd_storage_hosts_count - 1 if storage_hosts_count > 0: for i in range(MAX_GROUP_RESOURCES_SIZE): host_name = "storage_host_" + str( random.randint(0, storage_hosts_count)) if host_name not in host_name_list: host_name_list.append(host_name) # Create comma separated list storage_hosts = None for host in host_name_list: if storage_hosts: storage_hosts = storage_hosts + "," + host else: storage_hosts = host f = { "name": "storage_host_group_" + str(idx), "description": "storage_host_group_" + str(idx), "storage_id": self.storage_id, "native_storage_host_group_id": "storage_host_group_" + str(idx), "storage_hosts": storage_hosts } storage_host_grp_list.append(f) storage_host_grp_relation_list = [] for storage_host_group in storage_host_grp_list: storage_hosts = storage_host_group.pop('storage_hosts', None) if not storage_hosts: continue storage_hosts = storage_hosts.split(',') for storage_host in storage_hosts: storage_host_group_relation = { 'storage_id': self.storage_id, 'native_storage_host_group_id': storage_host_group['native_storage_host_group_id'], 'native_storage_host_id': storage_host } storage_host_grp_relation_list \ .append(storage_host_group_relation) result = { 'storage_host_groups': storage_host_grp_list, 'storage_host_grp_host_rels': storage_host_grp_relation_list } return result def list_port_groups(self, ctx): rd_port_groups_count = random.randint(MIN_PORT_GROUPS, MAX_PORT_GROUPS) LOG.info("###########fake_port_groups for %s: %d" % (self.storage_id, rd_port_groups_count)) port_grp_list = [] for idx in range(rd_port_groups_count): # Create ports in ports group port_name_list = [] ports_count = self.rd_ports_count - 1 if ports_count > 0: for i in range(MAX_GROUP_RESOURCES_SIZE): port_name = "port_" + str( random.randint(0, ports_count)) if port_name not in port_name_list: port_name_list.append(port_name) # Create comma separated list ports = None for port in port_name_list: if ports: ports = ports + "," + port else: ports = port f = { "name": "port_group_" + str(idx), "description": "port_group_" + str(idx), "storage_id": self.storage_id, "native_port_group_id": "port_group_" + str(idx), "ports": ports } port_grp_list.append(f) port_group_relation_list = [] for port_group in port_grp_list: ports = port_group.pop('ports', None) if not ports: continue ports = ports.split(',') for port in ports: port_group_relation = { 'storage_id': self.storage_id, 'native_port_group_id': port_group['native_port_group_id'], 'native_port_id': port } port_group_relation_list.append(port_group_relation) result = { 'port_groups': port_grp_list, 'port_grp_port_rels': port_group_relation_list } return result def list_volume_groups(self, ctx): rd_volume_groups_count = random.randint(MIN_VOLUME_GROUPS, MAX_VOLUME_GROUPS) LOG.info("###########fake_volume_groups for %s: %d" % (self.storage_id, rd_volume_groups_count)) volume_grp_list = [] for idx in range(rd_volume_groups_count): # Create volumes in volumes group volume_name_list = [] volumes_count = self.rd_volumes_count - 1 if volumes_count > 0: for i in range(MAX_GROUP_RESOURCES_SIZE): volume_name = "volume_" + str( random.randint(0, volumes_count)) if volume_name not in volume_name_list: volume_name_list.append(volume_name) # Create comma separated list volumes = None for volume in volume_name_list: if volumes: volumes = volumes + "," + volume else: volumes = volume f = { "name": "volume_group_" + str(idx), "description": "volume_group_" + str(idx), "storage_id": self.storage_id, "native_volume_group_id": "volume_group_" + str(idx), "volumes": volumes } volume_grp_list.append(f) volume_group_relation_list = [] for volume_group in volume_grp_list: volumes = volume_group.pop('volumes', None) if not volumes: continue volumes = volumes.split(',') for volume in volumes: volume_group_relation = { 'storage_id': self.storage_id, 'native_volume_group_id': volume_group['native_volume_group_id'], 'native_volume_id': volume} volume_group_relation_list.append(volume_group_relation) result = { 'volume_groups': volume_grp_list, 'vol_grp_vol_rels': volume_group_relation_list } return result def list_masking_views(self, ctx): rd_masking_views_count = random.randint(MIN_MASKING_VIEWS, MAX_MASKING_VIEWS) LOG.info("##########fake_masking_views for %s: %d" % (self.storage_id, rd_masking_views_count)) masking_view_list = [] for idx in range(rd_masking_views_count): is_group_based = random.randint(NON_GROUP_BASED_MASKING, GROUP_BASED_MASKING) if is_group_based: native_storage_host_group_id = "storage_host_group_" + str(idx) native_volume_group_id = "volume_group_" + str(idx) native_port_group_id = "port_group_" + str(idx) native_storage_host_id = "" native_volume_id = "" else: native_storage_host_group_id = "" native_volume_group_id = "" native_port_group_id = "" native_storage_host_id = "storage_host_" + str(idx) native_volume_id = "volume_" + str(idx) f = { "name": "masking_view_" + str(idx), "description": "masking_view_" + str(idx), "storage_id": self.storage_id, "native_masking_view_id": "masking_view_" + str(idx), "native_storage_host_group_id": native_storage_host_group_id, "native_volume_group_id": native_volume_group_id, "native_port_group_id": native_port_group_id, "native_storage_host_id": native_storage_host_id, "native_volume_id": native_volume_id, } masking_view_list.append(f) return masking_view_list ================================================ FILE: delfin/drivers/fujitsu/__init__.py ================================================ ================================================ FILE: delfin/drivers/fujitsu/eternus/__init__.py ================================================ ================================================ FILE: delfin/drivers/fujitsu/eternus/cli_handler.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import re import threading import six from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import constants from delfin.drivers.fujitsu.eternus import consts from delfin.drivers.fujitsu.eternus.consts import DIGITAL_CONSTANT from delfin.drivers.fujitsu.eternus.eternus_ssh_client import \ EternusSSHPool from delfin.drivers.utils.tools import Tools LOG = log.getLogger(__name__) class CliHandler(object): lock = None def __init__(self, **kwargs): self.lock = threading.RLock() self.kwargs = kwargs self.ssh_pool = EternusSSHPool(**kwargs) def login(self): """Test SSH connection """ try: self.exec_command(consts.GET_STORAGE_STATUS) except Exception as e: error = six.text_type(e) LOG.error("Login error: %s", error) raise e def exec_command(self, command, exe_time=consts.DEFAULT_EXE_TIME): try: self.lock.acquire() res = self.ssh_pool.do_exec_shell([ consts.SET_CLIENV_FORCE_UNLOCK, command], exe_time) except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e finally: self.lock.release() if res: if 'Error: ' in res: LOG.info(res) return None return res def common_data_encapsulation(self, command): common_data_str = self.exec_command(command) common_data_dict = dict() if common_data_str: common_data_arr = common_data_str.split('\n') for common_data_row in common_data_arr: if '[' in common_data_row and ']' in common_data_row: name_start_index = common_data_row.index('[') name_end_index = common_data_row.index(']') key = common_data_row[:name_start_index].strip() value = common_data_row[name_start_index + 1:name_end_index] common_data_dict[key] = value return common_data_dict def get_controllers(self): controller_data_str = self.exec_command(consts.GET_STORAGE_CONTROLLER) controller_info_list = [] try: if controller_data_str: result_data_arr = controller_data_str.split('\n') controller_info_map = {} for common_data_row in result_data_arr: row_pattern = re.compile(consts.CONTROLLER_NEWLINE_PATTERN) row_search_obj = row_pattern.search(common_data_row) if row_search_obj: name = row_search_obj.group().split(' ')[0] if controller_info_map: controller_info_list.append(controller_info_map) controller_info_map = {} controller_info_map['name'] = name pattern = re.compile(consts.COMMON_VALUE_PATTERN) search_obj = pattern.search(common_data_row) if search_obj: self.analysis_data_to_map(common_data_row, consts.COMMON_VALUE_PATTERN, controller_info_map) if controller_info_map: controller_info_list.append(controller_info_map) except Exception as e: err_msg = "get controller info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return controller_info_list def analysis_data_to_map(self, source_info, pattern_str, obj_map): """Get the contents in brackets through regular expressions. source_info:Source data, example: "Memory Size [4.0GB]" pattern_str: regular expression. example:"\\[.*\\]" """ object_info = '' object_infos = re.findall(pattern_str, source_info) if object_infos: object_info = object_infos[0] key = source_info.replace(object_info, '').strip() value = object_info.replace('[', '').replace(']', '') obj_map[key] = value return object_info def get_volumes_type(self, volume_id_dict=None, command=None): if volume_id_dict is None: volume_id_dict = {} try: volumes_type_str = self.exec_command(command) except Exception as e: LOG.error("Get %s info error: %s" % (command, six.text_type(e))) return volume_id_dict block = True if volumes_type_str: volumes_type_arr = volumes_type_str.replace('\r', '').split('\n') for volumes_type_row_str in volumes_type_arr: if not volumes_type_row_str or \ consts.CLI_STR in volumes_type_row_str: continue if consts.SPECIAL_CHARACTERS_TWO in volumes_type_row_str: block = False continue if block: continue volume_type_dict = {} volumes_type_row_arr = volumes_type_row_str.split() volume_id = volumes_type_row_arr[DIGITAL_CONSTANT.ZERO_INT] volume_type = volumes_type_row_arr[ DIGITAL_CONSTANT.MINUS_SIX_INT] volume_type_dict['type'] = volume_type.lower() if \ volume_type else constants.VolumeType.THICK volume_type_dict['used_capacity'] = int( volumes_type_row_arr[ DIGITAL_CONSTANT.MINUS_ONE_INT]) * units.Mi volume_id_dict[volume_id] = volume_type_dict return volume_id_dict def get_alerts(self, command, query_para, list_alert=None): if not list_alert: list_alert = [] events_error_str = self.exec_command(command, consts.ALERT_EXE_TIME) if not events_error_str: return list_alert events_error_dict = self.get_event(events_error_str, query_para) for events_error_dict_values in events_error_dict.values(): alerts_model = dict() description = events_error_dict_values.get('description') alerts_model['alert_id'] = events_error_dict_values.get('code') severity = events_error_dict_values.get('severity') alerts_model['severity'] = consts.SEVERITY_MAP.get( events_error_dict_values.get('severity'), constants.Severity.NOT_SPECIFIED) alerts_model['category'] = constants.Category.FAULT occur_time = events_error_dict_values.get('occur_time') alerts_model['occur_time'] = occur_time alerts_model['description'] = description alerts_model['type'] = constants.EventType.EQUIPMENT_ALARM alerts_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alerts_model['alert_name'] = description alerts_model['match_key'] = hashlib.md5('{}{}{}'.format( occur_time, severity, description).encode()).hexdigest() list_alert.append(alerts_model) return list_alert @staticmethod def get_event(events_error_str, query_para): events_error_dict = dict() events_error_arr = events_error_str.split('\n') for events_error_row_str in events_error_arr: events_error_row_str = events_error_row_str.strip() reg = re.compile(r"(\d{4}-\d{1,2}-\d{1,2})") if not re.match(reg, events_error_row_str): continue error_description_dict = dict() time_stamp = Tools().time_str_to_timestamp( events_error_row_str[:consts.OCCUR_TIME_RANGE].strip(), consts.TIME_PATTERN) if query_para is not None: try: if time_stamp is None or time_stamp \ < int(query_para.get('begin_time')) or \ time_stamp > int(query_para.get('end_time')): continue except Exception as e: LOG.error(e) severity = events_error_row_str[consts.SEVERITY_RANGE_BEGIN: consts.SEVERITY_RANGE_END].strip() code = events_error_row_str[consts.CODE_RANGE_BEGIN: consts.CODE_RANGE_END].strip() description = events_error_row_str[consts.DESCRIPTION_RANGE:] \ .strip() key = '{}{}{}'.format(severity, code, description) if events_error_dict.get(key): continue error_description_dict['severity'] = severity error_description_dict['code'] = code error_description_dict['description'] = description error_description_dict['occur_time'] = time_stamp events_error_dict[key] = error_description_dict return events_error_dict def format_data(self, command, storage_id, method, is_port=False): data_info = self.exec_command(command) data_list = [] if not data_info: return data_list data_array = data_info.split('\n') data_map = {} for data in data_array: if data and data not in '\r': temp_data = data.split(' ') temp_data = list( filter(lambda s: s and s.strip(), temp_data)) if len(temp_data) >= consts.DATA_VALUE_INDEX: data_length = consts.DATA_VALUE_INDEX if is_port: data_length = len(temp_data) for i in range(consts.DATA_KEY_INDEX, data_length): key = temp_data[0].strip() value = temp_data[i].replace('[', '').replace(']', '') value = value.strip() if data_map.get(i): data_map[i][key] = value else: data_map[i] = { key: value } else: data_list.extend(method(data_map, storage_id)) data_map = {} if data_map: data_list.extend(method(data_map, storage_id)) return data_list @staticmethod def format_fc_ports(port_map, storage_id): port_list = [] for key in port_map: speed = None if port_map[key].get('Transfer Rate') and ( 'Gbit/s' in port_map[key].get('Transfer Rate')): speed = port_map[key].get('Transfer Rate').replace('Gbit/s', '') speed = int(speed) * units.G name = port_map[key].get('Port') port_model = { 'name': name, 'storage_id': storage_id, 'native_port_id': port_map[key].get('Port'), 'location': port_map[key].get('Port'), 'type': constants.PortType.FC, 'speed': speed, } port_list.append(port_model) return port_list @staticmethod def format_disks(disk_map, storage_id): disk_list = [] for key in disk_map: speed = None if 'rpm' in disk_map[key].get('Speed'): speed = int(disk_map[key].get('Speed').replace('rpm', '')) size = Tools.get_capacity_size(disk_map[key].get('Size')) physical_type = constants.DiskPhysicalType.UNKNOWN if 'SSD' in disk_map[key].get('Type'): physical_type = consts.DiskPhysicalTypeMap.get('SSD') elif 'Nearline' in disk_map[key].get('Type'): physical_type = consts.DiskPhysicalTypeMap.get('Nearline') elif 'Online' in disk_map[key].get('Type'): physical_type = consts.DiskPhysicalTypeMap.get('Online') elif 'SAS' in disk_map[key].get('Type'): physical_type = consts.DiskPhysicalTypeMap.get('SAS') logical_type = \ consts.DiskLogicalTypeMap.get( disk_map[key].get('Usage'), constants.DiskLogicalType.UNKNOWN ) status = None if disk_map[key].get('Status').split('('): status = disk_map[key].get('Status').split('(')[0] status = \ consts.DISK_STATUS_MAP.get( status.strip(), constants.DiskStatus.OFFLINE) disk_model = { 'name': disk_map[key].get('Location'), 'storage_id': storage_id, 'native_disk_id': disk_map[key].get('Location'), 'serial_number': disk_map[key].get('Serial Number'), 'manufacturer': disk_map[key].get('Vendor ID'), 'model': disk_map[key].get('Type'), 'firmware': disk_map[key].get('Firmware Revision'), 'location': disk_map[key].get('Location'), 'speed': speed, 'capacity': size, 'status': status, 'physical_type': physical_type, 'logical_type': logical_type } disk_list.append(disk_model) return disk_list def get_volumes_or_pool(self, command, str_pattern): data_str = self.exec_command(command) pool_info_list = [] try: if data_str: result_data_arr = data_str.replace('\r', '').split('\n') titles = [] for common_data_row in result_data_arr: title_pattern = re.compile(str_pattern) title_search_obj = title_pattern.search(common_data_row) if title_search_obj: titles = common_data_row.split(",") else: if common_data_row: values = common_data_row.split(",") if values and len(values) == len(titles): obj_model = {} for num in range(len(values)): key = titles[num].lower() \ .replace(' ', '') \ .replace('[', '') \ .replace(']', '') obj_model[key] = values[num] if obj_model: pool_info_list.append(obj_model) except Exception as e: err_msg = "execution {}: error: {}".format(command, six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return pool_info_list def get_ports_status(self): port_data_str = self.exec_command(consts.GET_STORAGE_CONTROLLER) port_info_dict = {} try: if port_data_str: result_data_arr = port_data_str.split('\n') port_info_map = {} name = None for common_data_row in result_data_arr: row_pattern = re.compile(consts.PORT_NEWLINE_PATTERN) row_search_obj = row_pattern.search(common_data_row) if row_search_obj: name = row_search_obj.group().replace( ' Information', '') port_info_map['name'] = name continue elif port_info_map: pattern = re.compile(consts.COMMON_VALUE_PATTERN) search_obj = pattern.search(common_data_row) if search_obj: self.analysis_data_to_map( common_data_row, consts.COMMON_VALUE_PATTERN, port_info_map) if 'WWN' in common_data_row: port_info_dict[name] = port_info_map port_info_map = {} except Exception as e: err_msg = "get fc port info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return port_info_dict ================================================ FILE: delfin/drivers/fujitsu/eternus/consts.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from delfin.common import constants # get_storage function part GET_STORAGE_NAME = 'show storage-system-name' GET_STORAGE_VENDOR = 'FUJITSU' GET_ENCLOSURE_STATUS = 'show enclosure-status' GET_STORAGE_STATUS = 'show status' GET_STORAGE_SERIAL_NUMBER = 'show boxid' GET_STORAGE_FIRMWARE_VERSION = 'show firmware-version' GET_STORAGE_TOTAL_CAPACITY = 'show storage-cluster-license' GET_STORAGE_CONTROLLER = 'show fru-ce' GET_STORAGE_CONTROLLER_STATUS = 'show enclosure-status -type all' SET_CLIENV_FORCE_UNLOCK = 'set clienv-force-unlock' FIRMWARE_VERSION_CURRENT_COUNT = 3 FIRMWARE_VERSION_LENGTH = 4 CURRENT = 'Current' FIRMWARE_VERSION_NUMBER = 1 # list_volume function part GET_LIST_VOLUMES = 'show volumes' GET_LIST_VOLUMES_MODE_UID = 'show volumes -mode uid' GET_LIST_VOLUMES_TYPE_TPV = 'show volumes -type tpv' GET_LIST_VOLUMES_TYPE_FTV = 'show volumes -type ftv' CLI_STR = 'CLI>' SPECIAL_CHARACTERS_ONE = '^' SPECIAL_CHARACTERS_TWO = '--' VOLUME_TYPE_OPEN = 'open' VOLUME_ID_COUNT = 0 VOLUME_NAME_COUNT = 1 VOLUME_STATUS_COUNT = 2 VOLUME_TYPE_COUNT = 3 NATIVE_STORAGE_POOL_ID_COUNT = 5 TOTAL_CAPACITY_COUNT = 7 DEFAULT_USED_CAPACITY = 0 DEFAULT_FREE_CAPACITY = 0 VOLUMES_CYCLE = 5 VOLUMES_LENGTH = 6 # get_volumes_model function part GET_VOLUMES_MODEL_VOLUME_ID_COUNT = 0 GET_VOLUMES_MODEL_VOLUME_NAME_COUNT = 1 GET_VOLUMES_MODEL_VOLUME_STATUS_COUNT = 2 GET_VOLUMES_MODEL_POOL_ID_COUNT = 4 GET_VOLUMES_MODEL_TOTAL_CAPACITY_COUNT = 8 GET_VOLUMES_MODEL_WWN_COUNT = 9 # list_storage_pools function part GET_STORAGE_POOL_CSV = 'show raid-groups -csv' GET_STORAGE_POOL = 'show raid-groups' POOL_ID_COUNT = 0 POOL_NAME_COUNT = 1 POOL_STATUS_COUNT = 4 POOL_TOTAL_CAPACITY_COUNT = 5 POOL_FREE_CAPACITY_COUNT = 6 POOL_CYCLE = 5 POOL_LENGTH = 6 GET_DISK_COMMAND = 'show disks -disk all' # port GET_PORT_FC_PARAMETERS = 'show fc-parameters' GET_PORT_FCOE_PARAMETERS = 'show fcoe-parameters' PORT_NEWLINE_PATTERN = 'CM#\\d.*Port#\\d Information' DATA_KEY_INDEX = 1 DATA_VALUE_INDEX = 2 CONTROLLER_NEWLINE_PATTERN = 'CM#\\d Information' COMMON_VALUE_PATTERN = '\\[.*\\]' SIZE_PATTERN = "\\d+(?:\\.\\d+)?" POOL_TITLE_PATTERN = "^\\[RAID Group No\\.\\],\\[RAID Group Name" VOLUME_TITLE_PATTERN = "^\\[Volume No\\.\\],\\[Volume Name]" CONTROLLER_STATUS_PATTERN = 'Controller Module Status/Status Code' CONTROLLER_STATUS_NORMAL_KEY = 'Normal' # list_disk function part SPECIFIC_CHARACTER_ONE = '[' SPECIFIC_CHARACTER_TWO = ']' # list_alert function SHOW_EVENTS_SEVERITY_WARNING = 'show events -severity warning' SHOW_EVENTS_SEVERITY_ERROR = 'show events -severity error' SHOW_EVENTS_LEVEL_WARNING = 'show events -level warning' SHOW_EVENTS_LEVEL_ERROR = 'show events -level error' OCCUR_TIME_RANGE = 19 SEVERITY_RANGE_BEGIN = 22 SEVERITY_RANGE_END = 34 CODE_RANGE_BEGIN = 38 CODE_RANGE_END = 46 DESCRIPTION_RANGE = 48 TIME_PATTERN = '%Y-%m-%d %H:%M:%S' ALERT_EXE_TIME = 5 DEFAULT_EXE_TIME = 0.5 class DIGITAL_CONSTANT(object): ZERO_INT = 0 ONE_INT = 1 MINUS_ONE_INT = -1 TWO_INT = 2 THREE_INT = 3 FIVE_INT = 5 SIX_INT = 6 MINUS_SIX_INT = -6 SEVEN_INT = 7 THOUSAND_INT = 1000 STORAGE_STATUS_MAP = {'normal': constants.StorageStatus.NORMAL, 'offline': constants.StorageStatus.OFFLINE, 'abnormal': constants.StorageStatus.ABNORMAL, 'degraded': constants.StorageStatus.DEGRADED, 'Empty': constants.StorageStatus.OFFLINE, 'Normal': constants.StorageStatus.NORMAL, 'Pinned Data': constants.StorageStatus.OFFLINE, 'Unused': constants.StorageStatus.OFFLINE, 'Warning': constants.StorageStatus.OFFLINE, 'Maintenance': constants.StorageStatus.ABNORMAL, 'Error': constants.StorageStatus.ABNORMAL, 'Loop Down': constants.StorageStatus.OFFLINE, 'Not Ready': constants.StorageStatus.ABNORMAL, 'Subsystem Down': constants.StorageStatus.ABNORMAL, 'Change Assigned CM': constants.StorageStatus.ABNORMAL} STORAGE_POOL_STATUS_MAP = {'Available': constants.StoragePoolStatus.NORMAL, 'Spare in Use': constants.StoragePoolStatus.NORMAL, 'Readying': constants.StoragePoolStatus.NORMAL, 'Rebuild': constants.StoragePoolStatus.NORMAL, 'Copyback': constants.StoragePoolStatus.NORMAL, 'Redundant Copy': constants.StoragePoolStatus.NORMAL, 'Partially Exposed Rebuild': constants.StoragePoolStatus.ABNORMAL, 'Exposed Rebuild': constants.StoragePoolStatus.ABNORMAL, 'Exposed': constants.StoragePoolStatus.ABNORMAL, 'Partially Exposed': constants.StoragePoolStatus.ABNORMAL, 'No Disk Path': constants.StoragePoolStatus.ABNORMAL, 'SED Locked': constants.StoragePoolStatus.ABNORMAL, 'Broken': constants.StoragePoolStatus.ABNORMAL, 'Unknown': constants.StoragePoolStatus.UNKNOWN} LIST_VOLUMES_STATUS_MAP = { 'normal': constants.StorageStatus.NORMAL, 'offline': constants.StorageStatus.OFFLINE, 'abnormal': constants.StorageStatus.ABNORMAL, 'degraded': constants.StorageStatus.DEGRADED, 'Available': constants.StorageStatus.NORMAL, 'Spare in Use': constants.StorageStatus.ABNORMAL, 'Readying': constants.StorageStatus.ABNORMAL, 'Rebuild': constants.StorageStatus.ABNORMAL, 'Copyback': constants.StorageStatus.ABNORMAL, 'Redundant Copy': constants.StorageStatus.ABNORMAL, 'Partially Exposed Rebuild': constants.StorageStatus.ABNORMAL, 'Exposed': constants.StorageStatus.ABNORMAL, 'Partially Exposed': constants.StorageStatus.ABNORMAL, 'Not Ready': constants.StorageStatus.ABNORMAL, 'Broken': constants.StorageStatus.ABNORMAL, 'Data Lost': constants.StorageStatus.ABNORMAL, 'Not Available': constants.StorageStatus.OFFLINE, 'Unknown': constants.StorageStatus.UNKNOWN, } SEVERITY_MAP = { 'Warning': constants.Severity.WARNING, 'warning': constants.Severity.WARNING, 'Error': constants.Severity.FATAL, 'error': constants.Severity.FATAL } DiskPhysicalTypeMap = { 'Nearline': constants.DiskPhysicalType.UNKNOWN, 'Online': constants.DiskPhysicalType.UNKNOWN, 'SSD': constants.DiskPhysicalType.SSD, 'SAS': constants.DiskPhysicalType.SAS, 'unknown': constants.DiskPhysicalType.UNKNOWN } DiskLogicalTypeMap = { 'Data': constants.DiskLogicalType.MEMBER, 'Spare': constants.DiskLogicalType.SPARE, 'unknown': constants.DiskLogicalType.UNKNOWN, } DISK_STATUS_MAP = { 'Available': constants.DiskStatus.NORMAL, 'Spare': constants.DiskStatus.NORMAL, 'Present': constants.DiskStatus.NORMAL, 'Readying': constants.DiskStatus.NORMAL, 'Rebuild/Copyback': constants.DiskStatus.NORMAL, 'Copyback': constants.DiskStatus.NORMAL, 'Rebuild': constants.DiskStatus.NORMAL, 'Redundant': constants.DiskStatus.NORMAL, 'Not Supported': constants.DiskStatus.ABNORMAL, 'Not Exist': constants.DiskStatus.ABNORMAL, 'Failed Usable': constants.DiskStatus.ABNORMAL, 'Broken': constants.DiskStatus.NORMAL, 'Not Available': constants.DiskStatus.ABNORMAL, 'Formatting': constants.DiskStatus.NORMAL, 'Not Format': constants.DiskStatus.NORMAL } PARSE_ALERT_ALERT_ID = '1.3.6.1.2.1.1.3.0' PARSE_ALERT_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' PARSE_ALERT_COMPONENT = '1.3.6.1.4.1.211.1.21.1.150.7.0' PARSE_ALERT_LOCATION = '1.3.6.1.4.1.211.1.21.1.150.1.1.0' PARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.211.1.21.1.150.11.0' PARSE_ALERT_SEVERITY_MAP = { '1.3.6.1.4.1.211.4.1.1.126.1.150.0.5': constants.Severity.WARNING, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.2': constants.Severity.FATAL, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.3': constants.Severity.WARNING, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.9': constants.Severity.INFORMATIONAL, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.12': constants.Severity.INFORMATIONAL, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.50': constants.Severity.MINOR, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.51': constants.Severity.WARNING, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.60': constants.Severity.MINOR, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.61': constants.Severity.MINOR, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.62': constants.Severity.MINOR, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.64': constants.Severity.WARNING, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.65': constants.Severity.WARNING, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.66': constants.Severity.INFORMATIONAL, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.67': constants.Severity.MINOR, '1.3.6.1.4.1.211.4.1.1.126.1.150.0.68': constants.Severity.MINOR } # list_storage_hosts GET_HOST_WWN_NAMES = 'show host-wwn-names' GET_HOST_PATH_STATUS = 'show host-path-state' GET_HOST_ISCSI_NAMES = 'show host-iscsi-names' GET_HOST_ISCSI_NAMES_NUMBER = 'show host-iscsi-names -host-number {}' GET_HOST_SAS_ADDRESSES = 'show host-sas-addresses' HOST_PATH_STATUS_SPECIFIC_ONE = '----' HOST_PATH_STATUS_SPECIFIC_TWO = 'Online' HOST_ID_COUNT = 0 HOST_NAME_COUNT = 1 HOST_WWN_COUNT = 2 HOST_TYPE_COUNT = 4 HOST_FC_ENCAPSULATE_DATA_TOTAL = 5 HOST_PATH_STATUS_NAME = 2 HOST_PATH_STATUS = 3 HOST_PATH_STATUS_TOTAL = 4 HOST_ISCSI_NAMES_ZERO = 0 HOST_ISCSI_ONE = 1 HOST_ISCSI_THREE = 3 HOST_ISCSI_FOUR = 4 HOST_ISCSI_NAMES_TWO = 2 HOST_ISCSI_DETAIL_EIGHTEEN = 18 HOST_ISCSI_NAMES_SEVEN = 7 HOST_ISCSI_SPECIFIC_ONE = '*(' HOST_SAS_ZERO = 0 HOST_SAS_NAME = 1 HOST_SAS_ADDRESS = 2 HOST_SAS_OS = 4 HOST_SAS_ENCAPSULATE_DATA_TOTAL = 5 # list_storage_host_groups GET_HOST_GROUPS_ALL = 'show host-groups -all' HOST_GROUPS_SPECIFIC_ONE = '' HOST_GROUPS_SPECIFIC_TWO = '----' HOST_GROUP_ZERO = 0 HOST_GROUP_ONE = 1 HOST_GROUP_TOTAL = 2 # list_volume_groups GET_LUN_GROUPS = 'show lun-groups' LUN_GROUPS_SPECIFIC_TWO = '----' GET_LUN_GROUPS_LG_NUMBER = 'show lun-groups -lg-number {}' LUN_GROUPS_ID_COUNT = 0 LUN_GROUPS_NAME_COUNT = 1 LUN_VOLUME_ID = 1 LUN_VOLUME_LENGTH = 3 # list_masking_views GET_HOST_AFFINITY = 'show host-affinity' GET_PORT_GROUPS = 'show port-groups -all' GET_MAPPING = 'show mapping' PORT_GROUP_ARR_LENGTH = 2 PORT_GROUP_ID_NUM = 0 PORT_GROUP_NAME_NUM = 1 HOST_NAME_NUM = 1 HOST_GROUP_ID_NUM = 2 LUN_GROUP_ID_NUM = 4 LIST_MASKING_VIEWS_VOLUME_ID = 1 PORT_GROUP_ROW_ARR_NUM = 0 PORT_LIST_ROW_ARR_NUM = 1 VIEWS_GROUP_NUM_ZERO = 0 VIEWS_GROUP_ROW_KEY_LENGTH = 4 VIEWS_HOST_ROW_KEY_LENGTH = 3 VIEWS_GROUP_ROW_VALUE_LENGTH = 7 LIST_MASKING_VIEWS_SPECIFIC_ONE = '---' LIST_MASKING_VIEWS_SPECIFIC_TWO = '' LIST_MASKING_VIEWS_SPECIFIC_FOUR = '' LIST_MASKING_VIEWS_SPECIFIC_FIVE = 'CM#' LIST_MASKING_VIEWS_SPECIFIC_SIX = ' (Host' LIST_MASKING_VIEWS_SPECIFIC_SEVEN = 'LUN Volume' VIEWS_REGULAR_SPECIFIC_ONE = '^Port Group' VIEWS_REGULAR_SPECIFIC_TWO = '^Host' LIST_MASKING_VIEWS_CONSTANT_ZERO = 0 LIST_MASKING_VIEWS_CONSTANT_TWO = 2 HOST_OS_TYPES_MAP = { 'linux': constants.HostOSTypes.LINUX, 'windows': constants.HostOSTypes.WINDOWS, 'solaris': constants.HostOSTypes.SOLARIS, 'solaris mpxio': constants.HostOSTypes.SOLARIS, 'hp-ux': constants.HostOSTypes.HP_UX, 'aix': constants.HostOSTypes.AIX, 'aix vxvm': constants.HostOSTypes.AIX, 'xenserver': constants.HostOSTypes.XEN_SERVER, 'vmware esx': constants.HostOSTypes.VMWARE_ESX, 'linux_vis': constants.HostOSTypes.LINUX_VIS, 'windows server 2012': constants.HostOSTypes.WINDOWS_SERVER_2012, 'oracle vm': constants.HostOSTypes.ORACLE_VM, 'open vms': constants.HostOSTypes.OPEN_VMS, 'unknown': constants.HostOSTypes.UNKNOWN } ================================================ FILE: delfin/drivers/fujitsu/eternus/eternus_ssh_client.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import paramiko import six from oslo_log import log as logging from cryptography.hazmat.primitives.asymmetric import dsa from delfin import cryptor from delfin import exception, utils from delfin.drivers.utils.ssh_client import SSHPool LOG = logging.getLogger(__name__) def override_check_dsa_parameters(parameters): if parameters.p.bit_length() not in [512, 1024, 2048, 3072, 4096]: raise ValueError( "p must be exactly 1024, 2048, 3072, or 4096 bits long" ) if parameters.q.bit_length() not in [160, 224, 256]: raise ValueError("q must be exactly 160, 224, or 256 bits long") if not (1 < parameters.g < parameters.p): raise ValueError("g, p don't satisfy 1 < g < p.") dsa._check_dsa_parameters = override_check_dsa_parameters class EternusSSHPool(SSHPool): def create(self): ssh = paramiko.SSHClient() try: if self.ssh_pub_key is None: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) else: host_key = '%s %s %s' % \ (self.ssh_host, self.ssh_pub_key_type, self.ssh_pub_key) self.set_host_key(host_key, ssh) try: ssh.connect(hostname=self.ssh_host, port=self.ssh_port, username=self.ssh_username, password=cryptor.decode(self.ssh_password), timeout=self.ssh_conn_timeout) except Exception as e: if 'Authentication failed' in six.text_type(e): ssh.connect(hostname=self.ssh_host, port=self.ssh_port, username=self.ssh_username, password=cryptor.decode(self.ssh_password), timeout=self.ssh_conn_timeout, look_for_keys=False) else: raise e if self.conn_timeout: transport = ssh.get_transport() transport.set_keepalive(self.SOCKET_TIMEOUT) return ssh except Exception as e: err = six.text_type(e) LOG.error(err) if 'timed out' in err: raise exception.InvalidIpOrPort() elif 'No authentication methods available' in err \ or 'Authentication failed' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err: raise exception.InvalidPrivateKey() elif 'not found in known_hosts' in err: raise exception.SSHNotFoundKnownHosts(self.ssh_host) else: raise exception.SSHException(err) def do_exec_shell(self, command_list, exe_time): result = '' try: with self.item() as ssh: if command_list and ssh: channel = ssh.invoke_shell() for command in command_list: utils.check_ssh_injection(command) channel.send(command + '\r\n') time.sleep(exe_time) channel.send("exit" + "\r\n") channel.close() while True: resp = channel.recv(9999).decode('utf8') if not resp: time.sleep(exe_time) break result += resp if 'is not a recognized command' in result \ or 'Unknown command' in result: raise exception.StorageBackendException(result) except paramiko.AuthenticationException as ae: LOG.error('doexec Authentication error:{}'.format(ae)) raise exception.InvalidUsernameOrPassword() except Exception as e: err = six.text_type(e) LOG.error(err) if 'timed out' in err \ or 'SSH connect timeout' in err: raise exception.SSHConnectTimeout() elif 'No authentication methods available' in err \ or 'Authentication failed' in err \ or 'Invalid username or password' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err \ or 'not a valid RSA private key' in err: raise exception.InvalidPrivateKey() elif 'Unable to connect to port' in err \ or 'Invalid ip or port' in err: raise exception.InvalidIpOrPort() else: raise exception.SSHException(err) return result ================================================ FILE: delfin/drivers/fujitsu/eternus/eternus_stor.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import re import six from oslo_log import log from oslo_utils import units from delfin import exception, utils from delfin.common import constants from delfin.drivers import driver from delfin.drivers.fujitsu.eternus import cli_handler, consts from delfin.drivers.fujitsu.eternus.consts import DIGITAL_CONSTANT from delfin.drivers.utils.tools import Tools from delfin.i18n import _ LOG = log.getLogger(__name__) class EternusDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.cli_handler = cli_handler.CliHandler(**kwargs) self.login = self.cli_handler.login() def list_volumes(self, context): list_volumes = self.get_volumes_model() if not list_volumes: list_volumes = self.get_volumes_old() return list_volumes def get_volumes_model(self): list_volumes = [] volumes_str = self.cli_handler.exec_command( consts.GET_LIST_VOLUMES_MODE_UID) volume_id_dict = self.cli_handler.get_volumes_type( command=consts.GET_LIST_VOLUMES_TYPE_TPV) volume_id_dict = self.cli_handler.get_volumes_type( volume_id_dict, consts.GET_LIST_VOLUMES_TYPE_TPV) block = True if volumes_str: volumes_arr = volumes_str.replace('\r', '').split('\n') for volumes_row_str in volumes_arr: if not volumes_row_str or \ consts.CLI_STR in volumes_row_str: continue if consts.SPECIAL_CHARACTERS_TWO in volumes_row_str: block = False continue if block: continue volumes_row_arr = volumes_row_str.split() volume_id = volumes_row_arr[ consts.GET_VOLUMES_MODEL_VOLUME_ID_COUNT] type_capacity = volume_id_dict.get(volume_id, {}) volume_type = type_capacity.get('type', constants.VolumeType.THICK) used_capacity = type_capacity.get('used_capacity', DIGITAL_CONSTANT.ZERO_INT) volume_name = volumes_row_arr[ consts.GET_VOLUMES_MODEL_VOLUME_NAME_COUNT] volume_status = volumes_row_arr[ consts.GET_VOLUMES_MODEL_VOLUME_STATUS_COUNT] pool_id = volumes_row_arr[ consts.GET_VOLUMES_MODEL_POOL_ID_COUNT] total_capacity = \ int(volumes_row_arr[consts. GET_VOLUMES_MODEL_TOTAL_CAPACITY_COUNT]) * units.Mi wwn = volumes_row_arr[consts.GET_VOLUMES_MODEL_WWN_COUNT] volume = { 'name': volume_name, 'storage_id': self.storage_id, 'status': consts.LIST_VOLUMES_STATUS_MAP.get( volume_status), 'native_volume_id': volume_id, 'native_storage_pool_id': pool_id, 'type': volume_type, 'wwn': wwn, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity } list_volumes.append(volume) return list_volumes def get_volumes_old(self): list_volumes = [] volumes_str = self.cli_handler.exec_command(consts.GET_LIST_VOLUMES) volumes_arr = volumes_str.split('\n') if len(volumes_arr) < consts.VOLUMES_LENGTH: return list_volumes for volumes_num in range(consts.VOLUMES_CYCLE, len(volumes_arr)): volumes_row_str = volumes_arr[volumes_num] if not volumes_row_str or \ consts.CLI_STR in volumes_row_str.strip(): continue volumes_row_arr = volumes_row_str.split() volume_id = volumes_row_arr[consts.VOLUME_ID_COUNT] volume_name = volumes_row_arr[consts.VOLUME_NAME_COUNT] volume_status = volumes_row_arr[consts.VOLUME_STATUS_COUNT] volume_type = volumes_row_arr[consts.VOLUME_TYPE_COUNT] pool_id = volumes_row_arr[consts.NATIVE_STORAGE_POOL_ID_COUNT] total_capacity = volumes_row_arr[consts.TOTAL_CAPACITY_COUNT] volume_results = { 'name': volume_name, 'storage_id': self.storage_id, 'status': consts.LIST_VOLUMES_STATUS_MAP.get( volume_status), 'native_volume_id': volume_id, 'native_storage_pool_id': pool_id, 'type': constants.VolumeType.THIN if volume_type and consts.VOLUME_TYPE_OPEN in volume_type else constants.VolumeType.THICK, 'total_capacity': int(total_capacity) * units.Mi, 'used_capacity': consts.DEFAULT_USED_CAPACITY, 'free_capacity': consts.DEFAULT_FREE_CAPACITY } list_volumes.append(volume_results) return list_volumes def add_trap_config(self, context, trap_config): pass def clear_alert(self, context, alert): pass def get_storage(self, context): storage_name_dict = self.cli_handler.common_data_encapsulation( consts.GET_STORAGE_NAME) storage_name = storage_name_dict.get('Name') storage_description = storage_name_dict.get('Description') storage_location = storage_name_dict.get('Installation Site') enclosure_status = self.cli_handler.common_data_encapsulation( consts.GET_ENCLOSURE_STATUS) storage_model = enclosure_status.get('Model Name') storage_serial_number = enclosure_status.get('Serial Number') storage_firmware_version = enclosure_status.get('Firmware Version') storage_status_dict = self.cli_handler.common_data_encapsulation( consts.GET_STORAGE_STATUS) storage_status = consts.STORAGE_STATUS_MAP.get( storage_status_dict.get('Summary Status')) raw_capacity = consts.DIGITAL_CONSTANT.ZERO_INT list_disks = self.list_disks(context) if list_disks: for disks in list_disks: raw_capacity += disks.get('capacity', consts.DIGITAL_CONSTANT.ZERO_INT) total_capacity = consts.DIGITAL_CONSTANT.ZERO_INT used_capacity = consts.DIGITAL_CONSTANT.ZERO_INT free_capacity = consts.DIGITAL_CONSTANT.ZERO_INT list_storage_pools = self.list_storage_pools(context) if list_storage_pools: for pools in list_storage_pools: total_capacity += pools.get('total_capacity') used_capacity += pools.get('used_capacity') free_capacity += pools.get('free_capacity') storage = { 'name': storage_name, 'vendor': consts.GET_STORAGE_VENDOR, 'description': storage_description, 'model': storage_model, 'status': storage_status, 'serial_number': storage_serial_number, 'firmware_version': storage_firmware_version, 'location': storage_location, 'raw_capacity': raw_capacity, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': free_capacity } return storage def list_controllers(self, context): controllers = self.cli_handler.get_controllers() controllers_status = self.cli_handler.common_data_encapsulation( consts.GET_STORAGE_CONTROLLER_STATUS) controller_list = [] for controller in (controllers or []): name = controller.get('name') status = constants.ControllerStatus.FAULT if controllers_status and controllers_status.get(name): status_value = controllers_status.get(name) if status_value and \ consts.CONTROLLER_STATUS_NORMAL_KEY in status_value: status = constants.ControllerStatus.NORMAL controller_model = { 'name': controller.get('name'), 'storage_id': self.storage_id, 'native_controller_id': controller.get('Serial Number'), 'status': status, 'location': controller.get('name'), 'soft_version': controller.get('Hard Revision'), 'cpu_info': controller.get('CPU Clock'), 'cpu_count': consts.DIGITAL_CONSTANT.ONE_INT, 'memory_size': str(int( Tools.get_capacity_size(controller.get('Memory Size')))) } controller_list.append(controller_model) return controller_list def list_disks(self, context): try: disk_list = \ self.cli_handler.format_data( consts.GET_DISK_COMMAND, self.storage_id, self.cli_handler.format_disks, False) return disk_list except Exception as e: error = six.text_type(e) LOG.error("Failed to get disk from fujitsu eternus %s" % error) raise exception.InvalidResults(error) def list_ports(self, context): port_list = self.cli_handler.format_data( consts.GET_PORT_FC_PARAMETERS, self.storage_id, self.cli_handler.format_fc_ports, True) ports_status = self.cli_handler.get_ports_status() for port in port_list: name = port.get('name') status_dict = ports_status.get(name, {}) if status_dict: link_status = status_dict.get('Link Status') connection_status = constants.PortConnectionStatus.UNKNOWN if 'Gbit/s' in link_status: reality = link_status.split()[0].replace('Gbit/s', '') speed = int(reality) * units.G port['speed'] = speed if 'Link Up' in link_status: connection_status = \ constants.PortConnectionStatus.CONNECTED if 'Link Down' in link_status: connection_status = \ constants.PortConnectionStatus.DISCONNECTED status_keys = status_dict.keys() status_dicts = {} for status_key in status_keys: if status_key and 'Status/Status Code' in status_key: status_dicts['Status/Status Code'] = status_dict.get( status_key) if status_key and status_key in 'Port WWN': status_dicts['WWN'] = status_dict.get(status_key) status = status_dicts.get('Status/Status Code') health_status = constants.PortHealthStatus.UNKNOWN if 'Normal' in status or 'normal' in status: health_status = constants.PortHealthStatus.NORMAL elif 'Unconnected' in status or 'unconnected' in status: health_status = constants.PortHealthStatus.UNKNOWN elif 'Error' in status or 'error' in status: health_status = constants.PortHealthStatus.ABNORMAL port['connection_status'] = connection_status port['wwn'] = status_dicts.get('WWN') port['health_status'] = health_status return port_list def list_storage_pools(self, context): pool_list = self.get_list_pools() if not pool_list: pool_list = self.get_list_pools_old(pool_list) return pool_list def get_list_pools_old(self, pool_list): pools_str = self.cli_handler.exec_command(consts.GET_STORAGE_POOL) if not pools_str: return pool_list pools_row_str = pools_str.split('\n') if len(pools_row_str) < consts.POOL_LENGTH: return pool_list for pools_row_num in range(consts.POOL_CYCLE, len(pools_row_str)): pools_row_arr = pools_row_str[pools_row_num].strip() if pools_row_arr in consts.CLI_STR or \ pools_row_arr in consts.SPECIAL_CHARACTERS_ONE: continue pools_arr = pools_row_arr.split() pool_id = pools_arr[consts.POOL_ID_COUNT] pool_name = pools_arr[consts.POOL_NAME_COUNT] pool_status = consts.STORAGE_POOL_STATUS_MAP.get( pools_arr[consts.POOL_STATUS_COUNT], constants.StoragePoolStatus.UNKNOWN) try: total_capacity = int( pools_arr[consts.POOL_TOTAL_CAPACITY_COUNT]) * units.Mi free_capacity = int( pools_arr[consts.POOL_FREE_CAPACITY_COUNT]) * units.Mi except Exception as e: LOG.info('Conversion digital exception:%s' % six.text_type(e)) return pool_list pool_model = { 'name': pool_name, 'storage_id': self.storage_id, 'native_storage_pool_id': str(pool_id), 'status': pool_status, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': total_capacity, 'used_capacity': total_capacity - free_capacity, 'free_capacity': free_capacity } pool_list.append(pool_model) return pool_list def get_list_pools(self): pool_list = [] pools = self.cli_handler.get_volumes_or_pool( consts.GET_STORAGE_POOL_CSV, consts.POOL_TITLE_PATTERN) for pool in (pools or []): free_cap = float( pool.get("freecapacity(mb)")) * units.Mi total_cap = float( pool.get("totalcapacity(mb)")) * units.Mi used_cap = total_cap - free_cap status = consts.STORAGE_POOL_STATUS_MAP.get( pool.get('status'), constants.StoragePoolStatus.UNKNOWN) pool_model = { 'name': pool.get('raidgroupname'), 'storage_id': self.storage_id, 'native_storage_pool_id': str(pool.get('raidgroupno.')), 'status': status, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': int(total_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap) } pool_list.append(pool_model) return pool_list def remove_trap_config(self, context, trap_config): pass def reset_connection(self, context, **kwargs): pass def list_alerts(self, context, query_para=None): list_alert = self.cli_handler.get_alerts( consts.SHOW_EVENTS_SEVERITY_WARNING, query_para) list_alert = self.cli_handler.get_alerts( consts.SHOW_EVENTS_SEVERITY_ERROR, query_para, list_alert) if not list_alert: list_alert = self.cli_handler.get_alerts( consts.SHOW_EVENTS_LEVEL_WARNING, query_para) list_alert = self.cli_handler.get_alerts( consts.SHOW_EVENTS_LEVEL_ERROR, query_para, list_alert) return list_alert @staticmethod def parse_alert(context, alert): try: if consts.PARSE_ALERT_DESCRIPTION in alert.keys(): alert_model = dict() alert_model['alert_id'] = alert.get( consts.PARSE_ALERT_ALERT_ID) alert_model['severity'] = consts.PARSE_ALERT_SEVERITY_MAP.get( alert.get(consts.PARSE_ALERT_SEVERITY), constants.Severity.NOT_SPECIFIED) alert_model['category'] = constants.Category.FAULT alert_model['occur_time'] = utils.utcnow_ms() alert_model['description'] = alert.get( consts.PARSE_ALERT_DESCRIPTION) alert_model['location'] = '{}{}'.format(alert.get( consts.PARSE_ALERT_LOCATION), alert.get(consts.PARSE_ALERT_COMPONENT)) alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['alert_name'] = alert.get( consts.PARSE_ALERT_DESCRIPTION) alert_model['match_key'] = hashlib.md5(str(alert.get( consts.PARSE_ALERT_ALERT_ID)).encode()).hexdigest() return alert_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing")) raise exception.InvalidResults(msg) @staticmethod def get_access_url(): return 'https://{ip}' def list_storage_host_initiators(self, ctx): initiator_list = [] host_status = self.get_host_status() self.get_fc_sas_initiator(host_status, initiator_list, consts.GET_HOST_WWN_NAMES, consts.HOST_FC_ENCAPSULATE_DATA_TOTAL, consts.HOST_NAME_COUNT, consts.HOST_WWN_COUNT, constants.InitiatorType.FC) self.get_iscsi_initiator(host_status, initiator_list) self.get_fc_sas_initiator(host_status, initiator_list, consts.GET_HOST_SAS_ADDRESSES, consts.HOST_SAS_ENCAPSULATE_DATA_TOTAL, consts.HOST_SAS_NAME, consts.HOST_SAS_ADDRESS, constants.InitiatorType.SAS) return initiator_list def get_fc_sas_initiator(self, host_status, initiator_list, command, encapsulate_data_total, name_count, wwn_count, initiator_type): host_fc_list = self.get_data(command) for host_fc in host_fc_list: if len(host_fc) < encapsulate_data_total: continue fc_name = host_fc[name_count] fc_wwn = host_fc[wwn_count] state = host_status.get(fc_name) initiator_item = self.initiator_dict( fc_wwn, fc_name, state, initiator_type) initiator_list.append(initiator_item) def initiator_dict(self, wwn, host_id, state, initiator_type): status = constants.InitiatorStatus.OFFLINE if state is not None and state == consts.HOST_PATH_STATUS_SPECIFIC_TWO: status = constants.InitiatorStatus.ONLINE initiator_item = { "name": wwn, "storage_id": self.storage_id, "native_storage_host_initiator_id": wwn, "wwn": wwn, "status": status, "native_storage_host_id": host_id, 'type': initiator_type } return initiator_item def get_iscsi_initiator(self, host_status, initiator_list): host_iscsi_list = self.get_iscsi_host_data() for host_iscsi in host_iscsi_list: iscsi_name = host_iscsi.get('name') state = host_status.get(iscsi_name) iqn = host_iscsi.get('iqn') initiator_item = self.initiator_dict( iqn, iscsi_name, state, constants.InitiatorType.ISCSI) initiator_item['alias'] = host_iscsi.get('alias') initiator_list.append(initiator_item) def list_storage_hosts(self, ctx): host_list = [] host_status = self.get_host_status() self.get_fc_sas_host(host_list, host_status, consts.GET_HOST_WWN_NAMES, consts.HOST_FC_ENCAPSULATE_DATA_TOTAL, consts.HOST_NAME_COUNT, consts.HOST_TYPE_COUNT) self.get_iscsi_host(host_list, host_status) self.get_fc_sas_host(host_list, host_status, consts.GET_HOST_SAS_ADDRESSES, consts.HOST_SAS_ENCAPSULATE_DATA_TOTAL, consts.HOST_SAS_NAME, consts.HOST_SAS_OS) return host_list def get_fc_sas_host(self, host_list, host_status, command, encapsulate_data_total, name_count, type_count): host_fc_list = self.get_data(command) for host_fc in host_fc_list: if len(host_fc) < encapsulate_data_total: continue fc_name = host_fc[name_count] os = host_fc[type_count].lower() state = host_status.get(fc_name) status = constants.HostStatus.OFFLINE if state is not None and state == \ consts.HOST_PATH_STATUS_SPECIFIC_TWO: status = constants.HostStatus.NORMAL host_d = { "name": fc_name, "storage_id": self.storage_id, "native_storage_host_id": fc_name, "os_type": consts.HOST_OS_TYPES_MAP.get( os, constants.HostOSTypes.UNKNOWN), "status": status } host_list.append(host_d) def get_iscsi_host(self, host_list, host_status): host_iscsi_list = self.get_iscsi_host_data() for host_iscsi in host_iscsi_list: iscsi_name = host_iscsi.get('name') state = host_status.get(iscsi_name) os = host_iscsi.get('os') os = os.lower() if os else None status = constants.HostStatus.OFFLINE if state is not None and state ==\ consts.HOST_PATH_STATUS_SPECIFIC_TWO: status = constants.HostStatus.NORMAL host_d = { "name": iscsi_name, "storage_id": self.storage_id, "native_storage_host_id": iscsi_name, "os_type": consts.HOST_OS_TYPES_MAP.get( os, constants.HostOSTypes.UNKNOWN), "status": status, 'ip_address': host_iscsi.get('address') } host_list.append(host_d) def get_data(self, command): host_list = [] host_str = self.cli_handler.exec_command(command) block = True length_list = [] if host_str: host_arr = host_str.strip().replace('\r', '').split('\n') for host_row_str in host_arr: if not host_row_str or \ consts.CLI_STR in host_row_str: continue if consts.SPECIAL_CHARACTERS_TWO in host_row_str: length_list.extend( [len(identify) for identify in host_row_str.split()]) block = False continue if block: continue volume_list = [] key_length = DIGITAL_CONSTANT.ZERO_INT for length_key in length_list: volume = host_row_str[key_length: key_length + length_key].strip() volume_list.append(volume) key_length =\ key_length + length_key + DIGITAL_CONSTANT.ONE_INT host_list.append(volume_list) return host_list def get_iscsi_host_data(self): iscsi_list = [] iscsi_ids_str = self.cli_handler.exec_command( consts.GET_HOST_ISCSI_NAMES) block = True if iscsi_ids_str: iscsi_ids_arr = iscsi_ids_str.strip().replace('\r', '').split('\n') for iscsi_ids_row_str in iscsi_ids_arr: if not iscsi_ids_row_str or \ consts.CLI_STR in iscsi_ids_row_str: continue if consts.HOST_PATH_STATUS_SPECIFIC_ONE in iscsi_ids_row_str: block = False continue if block: continue iscsi_ids_row_arr = iscsi_ids_row_str.strip().split() if len(iscsi_ids_row_arr) < consts.HOST_ISCSI_NAMES_SEVEN: continue details = self.get_iscsi_details( iscsi_ids_row_arr[consts.HOST_ISCSI_NAMES_ZERO]) iscsi_d = { 'iscsi_id': details.get('Host No.'), 'name': details.get('Host Name'), 'iqn': details.get('iSCSI Name'), 'alias': details.get('Alias Name'), 'address': None if consts.HOST_ISCSI_SPECIFIC_ONE in details.get('IP Address') else details.get('IP Address'), 'os': details.get('Host Response Name') } iscsi_list.append(iscsi_d) return iscsi_list def get_iscsi_details(self, number): details = {} iscsi_details_str = self.cli_handler.exec_command( consts.GET_HOST_ISCSI_NAMES_NUMBER.format(number)) if iscsi_details_str: iscsi_ids_arr = iscsi_details_str.strip().replace('\r', '') \ .split('\n') for row_str in iscsi_ids_arr: if not row_str or consts.CLI_STR in row_str: continue iscsi_details_row_arr = row_str.strip().split(' ') if len(iscsi_details_row_arr) < consts.HOST_ISCSI_NAMES_TWO: continue key = row_str[:consts.HOST_ISCSI_DETAIL_EIGHTEEN].strip() value = row_str[consts.HOST_ISCSI_DETAIL_EIGHTEEN:].strip() details[key] = value return details def get_host_status(self): status_d = {} status_list = self.get_data(consts.GET_HOST_PATH_STATUS) for status_row in status_list: if len(status_row) < consts.HOST_PATH_STATUS_TOTAL: continue host_name = status_row[consts.HOST_PATH_STATUS_NAME] path_state = status_row[consts.HOST_PATH_STATUS] status_d[host_name] = path_state return status_d def list_storage_host_groups(self, ctx): host_group_list = [] host_group_all = self.cli_handler.exec_command( consts.GET_HOST_GROUPS_ALL) if host_group_all: host_group_all_arr = host_group_all.replace('\r', '').split('\n\n') for host_group_str in host_group_all_arr: host_group_arr = host_group_str.split( consts.HOST_GROUPS_SPECIFIC_ONE) host_group_row_arr = host_group_arr[ consts.HOST_GROUP_ZERO].strip().split('\n') host_group_id = None host_group_name = None block = True for host_group_row_str in host_group_row_arr: if not host_group_row_str or \ consts.CLI_STR in host_group_row_str: continue if consts.HOST_GROUPS_SPECIFIC_TWO in host_group_row_str: block = False continue if block: continue host_group = host_group_row_str.split() host_group_id = host_group[consts.HOST_GROUP_ZERO] host_group_name = host_group[consts.HOST_GROUP_ONE] storage_hosts = self.get_storage_hosts(host_group_arr) host_g = { 'name': host_group_name, 'storage_id': self.storage_id, 'native_storage_host_group_id': host_group_id, 'storage_hosts': storage_hosts, } host_group_list.append(host_g) storage_host_grp_relation_list = [] for storage_host_group in host_group_list: storage_hosts = storage_host_group.pop('storage_hosts', None) if not storage_hosts: continue storage_hosts = storage_hosts.split(',') for storage_host in storage_hosts: storage_host_group_relation = { 'storage_id': self.storage_id, 'native_storage_host_group_id': storage_host_group.get( 'native_storage_host_group_id'), 'native_storage_host_id': storage_host } storage_host_grp_relation_list \ .append(storage_host_group_relation) result = { 'storage_host_groups': host_group_list, 'storage_host_grp_host_rels': storage_host_grp_relation_list } return result @staticmethod def get_storage_hosts(host_group_arr): storage_hosts = None if len(host_group_arr) == consts.HOST_GROUP_TOTAL: host_row_arr = host_group_arr[consts.HOST_GROUP_ONE].split('\n') block = True for host_row_str in host_row_arr: if not host_row_str or consts.CLI_STR in host_row_str: continue if consts.HOST_GROUPS_SPECIFIC_TWO in host_row_str: block = False continue if block: continue host_arr = host_row_str.split() host_id = host_arr[consts.HOST_GROUP_ONE] if storage_hosts: storage_hosts = "{0},{1}".format(storage_hosts, host_id) else: storage_hosts = "{0}".format(host_id) return storage_hosts def list_volume_groups(self, ctx): vol_group_list = [] storage_id = self.storage_id lun_groups_list = self.get_data(consts.GET_LUN_GROUPS) for lun in lun_groups_list: lun_groups_id = lun[consts.LUN_GROUPS_ID_COUNT] lun_groups_name = lun[consts.LUN_GROUPS_NAME_COUNT] volumes_str = self.get_lun_group_details(lun_groups_id) vol_g = { 'name': lun_groups_name, 'storage_id': storage_id, 'native_volume_group_id': lun_groups_id, 'volumes': volumes_str } vol_group_list.append(vol_g) vol_grp_vol_relation_list = [] for vol_group in vol_group_list: volumes = vol_group.pop('volumes', None) if not volumes: continue for volume_id in volumes.split(','): storage_volume_group_relation = { 'storage_id': storage_id, 'native_volume_group_id': vol_group.get( 'native_volume_group_id'), 'native_volume_id': volume_id } vol_grp_vol_relation_list \ .append(storage_volume_group_relation) result = { 'volume_groups': vol_group_list, 'vol_grp_vol_rels': vol_grp_vol_relation_list } return result def get_lun_group_details(self, lun_groups_id): lun_group_details_str = self.cli_handler.exec_command( consts.GET_LUN_GROUPS_LG_NUMBER.format(lun_groups_id)) volumes_str = None if lun_group_details_str: lun_group_details_arr = lun_group_details_str.strip( ).replace('\r', '').split('\n') block = True for lun_details_row_str in lun_group_details_arr: if not lun_details_row_str or \ consts.CLI_STR in lun_details_row_str: continue if consts.LUN_GROUPS_SPECIFIC_TWO in lun_details_row_str: block = False continue if block: continue lun_details_arr = lun_details_row_str.strip().split() volume_id = lun_details_arr[consts.LUN_VOLUME_ID] if volumes_str: volumes_str = "{0},{1}".format(volumes_str, volume_id) else: volumes_str = "{0}".format(volume_id) return volumes_str def list_port_groups(self, ctx): port_group_list = [] storage_id = self.storage_id port_groups_str = self.cli_handler.exec_command(consts.GET_PORT_GROUPS) if port_groups_str: port_groups_arr = port_groups_str.strip().replace('\r', '').split( '\n\n') for port_group_str in port_groups_arr: port_group_arr = port_group_str.split( consts.LIST_MASKING_VIEWS_SPECIFIC_TWO) port_g_row_arr = port_group_arr[ consts.PORT_GROUP_ROW_ARR_NUM].split('\n') port_group_id = None port_group_name = None block = True for port_g_row_str in port_g_row_arr: if not port_g_row_str or \ consts.CLI_STR in port_g_row_str: continue if consts.LIST_MASKING_VIEWS_SPECIFIC_ONE \ in port_g_row_str: block = False continue if block: continue port_group = port_g_row_str.strip().split() port_group_id = port_group[consts.PORT_GROUP_ID_NUM] port_group_name = port_group[consts.PORT_GROUP_NAME_NUM] break ports_str = None if len(port_group_arr) == consts.PORT_GROUP_ARR_LENGTH: port_list_row_arr = port_group_arr[ consts.PORT_LIST_ROW_ARR_NUM].strip().split('\n') for port in port_list_row_arr: port_id = port.strip() if port_id in consts.CLI_STR: continue if ports_str: ports_str = "{0},{1}".format(ports_str, port_id) else: ports_str = "{0}".format(port_id) port_g = { 'name': port_group_name, 'storage_id': storage_id, 'native_port_group_id': port_group_id, 'ports': ports_str } port_group_list.append(port_g) port_grp_port_relation_list = [] for port_group in port_group_list: ports = port_group.pop('ports', None) if not ports: continue ports = ports.split(',') for ports_id in ports: port_groups_relation = { 'storage_id': storage_id, 'native_port_group_id': port_group.get( 'native_port_group_id'), 'native_port_id': ports_id } port_grp_port_relation_list \ .append(port_groups_relation) result = { 'port_groups': port_group_list, 'port_grp_port_rels': port_grp_port_relation_list } return result def list_masking_views(self, ctx): list_masking_views = [] view_id_dict = {} views_str = self.cli_handler.exec_command(consts.GET_HOST_AFFINITY) if views_str: views_arr = views_str.strip().replace('\r', '').split('\n\n') for views_group_str in views_arr: if consts.LIST_MASKING_VIEWS_SPECIFIC_FOUR \ in views_group_str: self.get_host_group_views( view_id_dict, list_masking_views, views_group_str) else: self.get_host_views(list_masking_views, views_group_str, view_id_dict) return list_masking_views def get_host_views(self, list_masking_views, views_group_str, view_id_dict): views_row_arr = views_group_str.strip().split('\n') block = True key = [] port_id = None for views_row_str in views_row_arr: if not views_row_str or \ consts.CLI_STR in views_row_str: continue if consts.LIST_MASKING_VIEWS_SPECIFIC_FIVE in views_row_str: port_id = views_row_str.split( consts.LIST_MASKING_VIEWS_SPECIFIC_SIX)[ consts.LIST_MASKING_VIEWS_CONSTANT_ZERO] self.get_group_key(views_row_str, consts.VIEWS_REGULAR_SPECIFIC_TWO, key) if consts.LIST_MASKING_VIEWS_SPECIFIC_ONE in views_row_str: block = False continue if block: continue if len(key) != consts.VIEWS_HOST_ROW_KEY_LENGTH: continue views_arr = views_row_str.strip().split() volume_group_id = views_arr[consts.LIST_MASKING_VIEWS_CONSTANT_TWO] host_name = views_arr[consts.HOST_NAME_NUM] view_id = '{}{}{}{}'.format( 'host_group_id', volume_group_id, host_name, 'volume_id') if view_id_dict.get(view_id): continue view_id_dict[view_id] = view_id view = { 'native_masking_view_id': view_id, 'name': view_id, 'native_storage_host_id': host_name, 'native_volume_group_id': volume_group_id, 'native_port_id': port_id, 'storage_id': self.storage_id, } list_masking_views.append(view) def get_host_group_views(self, view_id_dict, list_masking_views, views_group_str): views_group_arr = views_group_str.strip().split( consts.LIST_MASKING_VIEWS_SPECIFIC_FOUR) views_group_row_arr = views_group_arr[ consts.VIEWS_GROUP_NUM_ZERO].strip().split('\n') block = True group_key = [] for views_group_row in views_group_row_arr: if not views_group_row or \ consts.CLI_STR in views_group_row: continue self.get_group_key(views_group_row, consts.VIEWS_REGULAR_SPECIFIC_ONE, group_key) if consts.LIST_MASKING_VIEWS_SPECIFIC_ONE in views_group_row: block = False continue if block: continue views_row_arr = views_group_row.strip().split() if len(views_row_arr) != consts.VIEWS_GROUP_ROW_VALUE_LENGTH \ or len(group_key) != consts.VIEWS_GROUP_ROW_KEY_LENGTH: continue host_group_id = views_row_arr[consts.HOST_GROUP_ID_NUM] volume_group_id = views_row_arr[consts.LUN_GROUP_ID_NUM] view_id = '{}{}{}{}'.format(host_group_id, volume_group_id, 'host_id', 'volume_id') if view_id_dict.get(view_id): continue view_id_dict[view_id] = view_id view = { 'native_masking_view_id': view_id, 'name': view_id, 'native_storage_host_group_id': host_group_id, 'native_port_group_id': views_row_arr[ consts.PORT_GROUP_ID_NUM], 'native_volume_group_id': volume_group_id, 'storage_id': self.storage_id, } list_masking_views.append(view) @staticmethod def get_group_key(views_group_row, regular_str, key): title_pattern = re.compile(regular_str) title_search_obj = title_pattern.search(views_group_row) if title_search_obj: views_row_arr = views_group_row.strip().split(' ') for views in views_row_arr: if views: key.append(views.strip()) return key ================================================ FILE: delfin/drivers/h3c/__init__.py ================================================ ================================================ FILE: delfin/drivers/h3c/unistor_cf/__init__.py ================================================ ================================================ FILE: delfin/drivers/h3c/unistor_cf/unistor_cf.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.drivers.hpe.hpe_3par.hpe_3parstor import Hpe3parStorDriver class H3cUniStorCfDriver(Hpe3parStorDriver): def get_storage(self, context): storage_info = super().get_storage(context) storage_info['vendor'] = 'H3C' return storage_info ================================================ FILE: delfin/drivers/helper.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin import cryptor from delfin import db from delfin import exception from delfin.common import constants from delfin.i18n import _ LOG = log.getLogger(__name__) def encrypt_password(context, access_info): for access in constants.ACCESS_TYPE: if access_info.get(access): access_info[access]['password'] = cryptor.encode( access_info[access]['password']) def check_storage_repetition(context, storage): if not storage: raise exception.StorageBackendNotFound() if not storage.get('serial_number'): msg = _("Serial number should be provided by storage.") raise exception.InvalidResults(msg) filters = dict(serial_number=storage['serial_number']) storage_list = db.storage_get_all(context, filters=filters) if storage_list: msg = (_("Failed to register storage. Reason: same serial number: " "%s detected.") % storage['serial_number']) LOG.error(msg) raise exception.StorageAlreadyExists() def check_storage_consistency(context, storage_id, storage_new): """Check storage response returned by driver whether it matches the storage stored in database. :param context: The context of delfin. :type context: delfin.context.RequestContext :param storage_id: The uuid of storage in database. :type storage_id: string :param storage_new: The storage response returned by driver. :type storage_new: dict """ if not storage_new: raise exception.StorageBackendNotFound() if not storage_new.get('serial_number'): msg = _("Serial number should be provided by storage.") raise exception.InvalidResults(msg) storage_present = db.storage_get(context, storage_id) if storage_new['serial_number'] != storage_present['serial_number']: msg = (_("Serial number %s does not match " "the existing storage serial number %s.") % (storage_new['serial_number'], storage_present['serial_number'])) raise exception.StorageSerialNumberMismatch(msg) ================================================ FILE: delfin/drivers/hitachi/__init__.py ================================================ ================================================ FILE: delfin/drivers/hitachi/hnas/__init__.py ================================================ ================================================ FILE: delfin/drivers/hitachi/hnas/constants.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from delfin.common import constants DATA_HEAD_PATTERN = re.compile('[-]{3,}') ALERT_HEAD_PATTERN = re.compile('[*]{3,}') STORAGE_VENDOR = 'Hitachi' TIME_TYPE = '%Y-%m-%d %H:%M:%S' OID_TRAP_DATA = '1.3.6.1.4.1.11096.6.1.1' STORAGE_INFO_COMMAND = "cluster-show" STORAGE_MODEL_COMMAND = "ver" LOCATION_COMMAND = 'system-information-get' DISK_INFO_COMMAND = "sd-list --scsi" POOL_INFO_COMMAND = "span-list" POOL_SIZE_COMMAND = "span-space-distribution" CONTROLLER_INFO_COMMAND = "cluster-show -y" ALERT_INFO_COMMAND = "event-log-show -w -s -x" ALERT_TIME = " --from '%s'" ALERT_FORMAT_TIME = "%Y-%m-%d %H:%M:%S" FC_PORT_COMMAND = "fc-hports" FC_SPEED_COMMAND = "fc-link-speed" ETH_PORT_COMMAND = "ifconfig" FS_INFO_COMMAND = 'df -k' FS_STATUS_COMMAND = 'filesystem-list' CHECK_EVS = 'evs-select %s' QUOTA_INFO_COMMAND = "quota list %s" TREE_INFO_COMMAND = 'virtual-volume list --verbose %s' CIFS_SHARE_COMMAND = 'cifs-share list' NFS_SHARE_COMMAND = "nfs-export list" CLUSTER_STATUS = { 'Robust': constants.StorageStatus.NORMAL, 'Degraded': constants.StorageStatus.DEGRADED, 'Critical': constants.StorageStatus.ABNORMAL, 'OK': constants.StorageStatus.NORMAL, 'Failure(s)': constants.StorageStatus.ABNORMAL } SEVERITY_MAP = { 'Severe': constants.Severity.FATAL, 'Warning': constants.Severity.WARNING, 'Information': constants.Severity.INFORMATIONAL } FS_STATUS_MAP = { 'Fail!': constants.FilesystemStatus.FAULTY, 'OK': constants.FilesystemStatus.NORMAL, 'NoEVS': constants.FilesystemStatus.NORMAL, 'EVS-D': constants.FilesystemStatus.NORMAL, 'Hiddn': constants.FilesystemStatus.NORMAL, 'Clust': constants.FilesystemStatus.FAULTY, 'Unavl': constants.FilesystemStatus.NORMAL, 'Check': constants.FilesystemStatus.NORMAL, 'Fixng': constants.FilesystemStatus.NORMAL, 'Mount': constants.FilesystemStatus.NORMAL, 'MntRO': constants.FilesystemStatus.NORMAL, 'SysLk': constants.FilesystemStatus.NORMAL, 'SysRO': constants.FilesystemStatus.NORMAL, 'RepTg': constants.FilesystemStatus.NORMAL, 'Rcvry': constants.FilesystemStatus.NORMAL, 'UnMnt': constants.FilesystemStatus.FAULTY, 'Mntg': constants.FilesystemStatus.NORMAL, 'Formt': constants.FilesystemStatus.NORMAL, 'Failg': constants.FilesystemStatus.FAULTY, None: constants.FilesystemStatus.NORMAL, } FS_INDEX = { 'status_len': 6, 'id_index': 1, 'pool_index': 2, 'status_index': 3, 'detail_len': 8, 'total_index': 3, 'used_index': 4, 'free_index': 7, 'type_index': 8, } ETH_INDEX = { 'name_len': 1, 'name_index': 0, 'status_len': 2, 'status_index': 0, 'ip_len': 2, 'ip_index': 1, 'mask_index': 3 } ALERT_INDEX = { 'alert_len': 4, 'table_head': 0, 'severity_index': 1, 'year_index': 2, 'time_index': 3, 'id_index': 0 } NODE_INDEX = { 'node_len': 2, 'status_index': 2, 'name_index': 1, 'id_index': 0 } POOL_INDEX = { 'pool_len': 6, 'total_index': 3, 'free_index': 0, 'status_index': 1, 'name_index': 0, } DISK_INDEX = { 'type_len': 2, 'model_index': 1, 'vendor_index': 0, 'version_index': 2 } ================================================ FILE: delfin/drivers/hitachi/hnas/hds_nas.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.drivers import driver from delfin.drivers.hitachi.hnas import nas_handler class HitachiHNasDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.nas_handler = nas_handler.NasHandler(**kwargs) self.nas_handler.login() def reset_connection(self, context, **kwargs): self.nas_handler.login() def get_storage(self, context): return self.nas_handler.get_storage() def list_storage_pools(self, context): return self.nas_handler.get_pool(self.storage_id) def list_volumes(self, context): return [] def list_controllers(self, context): return self.nas_handler.list_controllers(self.storage_id) def list_ports(self, context): return self.nas_handler.list_ports(self.storage_id) def list_disks(self, context): return self.nas_handler.get_disk(self.storage_id) def list_alerts(self, context, query_para=None): return self.nas_handler.list_alerts(query_para) def list_qtrees(self, context): return self.nas_handler.list_qtrees(self.storage_id) def list_quotas(self, context): return self.nas_handler.list_quotas(self.storage_id) def list_filesystems(self, context): return self.nas_handler.list_filesystems(self.storage_id) def list_shares(self, context): return self.nas_handler.list_shares(self.storage_id) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return nas_handler.NasHandler.parse_alert(alert) def clear_alert(self, context, alert): pass @staticmethod def get_access_url(): return 'https://{ip}' ================================================ FILE: delfin/drivers/hitachi/hnas/nas_handler.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WarrayANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import time import eventlet import six from oslo_log import log as logging from oslo_utils import units from delfin import exception, utils from delfin.common import constants from delfin.drivers.utils import ssh_client from delfin.drivers.hitachi.hnas import constants as constant from delfin.drivers.utils.tools import Tools LOG = logging.getLogger(__name__) class NasHandler(object): def __init__(self, **kwargs): self.ssh_pool = ssh_client.SSHPool(**kwargs) self.evs_list = [] def ssh_do_exec(self, command_list): res = None with eventlet.Timeout(60, False): res = self.ssh_pool.do_exec_shell(command_list) while 'Failed to establish SSC connection' in res: res = self.ssh_pool.do_exec_shell(command_list) if res: return res else: raise \ exception.ConnectTimeout( 'Failed to establish SSC connection from hitachi hnas' ) def login(self): try: result = self.ssh_do_exec(['cluster-show -y']) if 'EVS' not in result: raise exception.InvalidIpOrPort() except Exception as e: LOG.error("Failed to login hnas %s" % (six.text_type(e))) raise e @staticmethod def format_data_to_map( value_info, value_key, line='\r\n', split=":", split_key=None): map_list = [] detail_array = value_info.split(line) value_map = {} for detail in detail_array: if detail: string_info = detail.split(split) key = string_info[0].replace(' ', '') value = '' if len(string_info) > 1: for string in string_info[1:]: value += string.\ replace('""', '').\ replace('\'', '').\ replace(' ', '') if value_map.get(key): value_map[key + '1'] = value else: value_map[key] = value else: if value_key in value_map: map_list.append(value_map) value_map = {} if split_key and split_key in detail: if value_key in value_map: map_list.append(value_map) value_map = {} if value_key in value_map: map_list.append(value_map) return map_list @staticmethod def get_table_data(values, is_alert=False): header_index = 0 table = values.split('\r\n') for i in range(len(table)): if constant.DATA_HEAD_PATTERN.search(table[i]): header_index = i if is_alert and constant.ALERT_HEAD_PATTERN.search(table[i]): header_index = i return table[(header_index + 1):] return table[(header_index + 1):] def format_storage_info(self, storage_map_list, model_map_list, version_map_list, location_map_list, serial_map_list): if not storage_map_list: raise exception.StorageBackendException( 'Failed to get HNAS storage') model_map = model_map_list[-1] if model_map_list else {} model = model_map.get('Model') model = model.replace('HNAS', 'HNAS ') version_map = version_map_list[-1] if version_map_list else {} location_map = location_map_list[-1] if location_map_list else {} serial_map = serial_map_list[-1] if serial_map_list else {} version = version_map.get("Software").split('(') serial_number = serial_map.get("Hardware").split('(')[-1] storage_map = storage_map_list[-1] disk_list = self.get_disk(None) total_capacity = \ raw_capacity = \ used_capacity = \ free_capacity = 0 for disk in disk_list: raw_capacity += disk['capacity'] status = \ constant.CLUSTER_STATUS.get(storage_map['ClusterHealth']) pool_list = self.get_pool(None) for pool in pool_list: total_capacity += pool['total_capacity'] used_capacity += pool['used_capacity'] free_capacity += pool['free_capacity'] storage_model = { "name": storage_map['ClusterName'], "vendor": constant.STORAGE_VENDOR, "model": model, "status": status, "serial_number": serial_number.replace(')', ''), "firmware_version": version[0], "location": location_map['Location'], "total_capacity": total_capacity, "raw_capacity": raw_capacity, "used_capacity": used_capacity, "free_capacity": free_capacity } return storage_model def get_storage(self): try: storage_info = self.ssh_do_exec([constant.STORAGE_INFO_COMMAND]) model_info = self.ssh_do_exec([constant.STORAGE_MODEL_COMMAND]) location_info = self.ssh_do_exec(([constant.LOCATION_COMMAND])) model_map_list = \ self.format_data_to_map(model_info, 'Model') storage_map_list = \ self.format_data_to_map( storage_info, 'ClusterName', split="=") version_map_list = \ self.format_data_to_map(model_info, 'Software') location_map_list = \ self.format_data_to_map(location_info, 'Location') serial_map_list =\ self.format_data_to_map(model_info, 'Hardware') storage_model = \ self.format_storage_info( storage_map_list, model_map_list, version_map_list, location_map_list, serial_map_list) return storage_model except exception.DelfinException as e: err_msg = "Failed to get storage from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_disk(self, storage_id): try: disk_info = self.ssh_do_exec([constant.DISK_INFO_COMMAND]) disk_map_list = \ self.format_data_to_map(disk_info, 'Capacity') disks_list = [] for disk_map in disk_map_list: if 'Status' in disk_map: size = disk_map['Capacity'].split('GiB')[0] + "GB" status = constants.DiskStatus.NORMAL \ if disk_map['Status'] == 'OK' \ else constants.DiskStatus.ABNORMAL disk_type = disk_map['Type'] type_array = disk_type.split(';') model = vendor = version = None if len(type_array) > constant.DISK_INDEX['type_len']: model = \ type_array[constant.DISK_INDEX[ 'model_index']].replace('Model', '') vendor = \ type_array[constant.DISK_INDEX[ 'vendor_index']].replace('Make', '') version = \ type_array[constant.DISK_INDEX[ 'version_index']].replace('Revision', '') pool_id = disk_map.get('Usedinspan') serial_number = disk_map['Luid'].split(']')[-1] if pool_id: pool_id = pool_id.split('(')[0] disk_model = { 'name': disk_map['HDSdevname'], 'storage_id': storage_id, 'native_disk_id': disk_map['DeviceID'], 'serial_number': serial_number, 'manufacturer': vendor, 'model': model, 'firmware': version, 'capacity': int(Tools.get_capacity_size(size)), 'status': status, 'native_disk_group_id': pool_id } disks_list.append(disk_model) return disks_list except exception.DelfinException as e: err_msg = "Failed to get disk from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get disk from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_pool_size(self): size_info = self.ssh_do_exec([constant.POOL_SIZE_COMMAND]) size_array = size_info.split('\r\n') size_map = {} pool_name = None for size in size_array: if 'Span ' in size: pool_name = size.split()[-1].replace(':', '') size_map[pool_name] = 0 if '[Free space]' in size: free_array = size.split() if len(free_array) > 2: free_size = free_array[0].replace('GiB', 'GB') size_map[pool_name] += Tools.get_capacity_size(free_size) return size_map def get_pool(self, storage_id): try: pool_info = self.ssh_do_exec([constant.POOL_INFO_COMMAND]) pool_list = [] pool_array = self.get_table_data(pool_info) size_map = self.get_pool_size() for pool in pool_array: value_array = pool.split() if len(value_array) == constant.POOL_INDEX['pool_len']: total_capacity = \ Tools.get_capacity_size( value_array[constant.POOL_INDEX['total_index']] + 'GB') free_capacity = \ size_map.get( value_array[constant.POOL_INDEX['free_index']], total_capacity) status = constants.StoragePoolStatus.NORMAL \ if value_array[ constant.POOL_INDEX['status_index']] == 'Yes' \ else constants.StoragePoolStatus.ABNORMAL pool_model = { 'name': value_array[constant.POOL_INDEX['name_index']], 'storage_id': storage_id, 'native_storage_pool_id': value_array[ constant.POOL_INDEX['name_index']], 'status': status, 'storage_type': constants.StorageType.FILE, 'total_capacity': total_capacity, 'used_capacity': total_capacity - free_capacity, 'free_capacity': free_capacity, } pool_list.append(pool_model) return pool_list except exception.DelfinException as e: err_msg = "Failed to get pool from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get pool from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_controllers(self, storage_id): try: controller_list = [] node_info = self.ssh_do_exec([constant.CONTROLLER_INFO_COMMAND]) nodes_array = self.get_table_data(node_info) for nodes in nodes_array: node = nodes.split() if len(node) > constant.NODE_INDEX['node_len']: status = constants.ControllerStatus.NORMAL \ if node[ constant.NODE_INDEX[ 'status_index']] == 'ONLINE' \ else constants.ControllerStatus.OFFLINE controller_model = { 'name': node[constant.NODE_INDEX['name_index']], 'storage_id': storage_id, 'native_controller_id': node[ constant.NODE_INDEX['id_index']], 'status': status } controller_list.append(controller_model) return controller_list except exception.DelfinException as e: err_msg = "Failed to get controllers from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get controllers from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) @staticmethod def format_alert_list(alert_array, query_para): alert_list = [] alert_model = {} for alert in alert_array: if alert and 'CAUSE' not in alert: alert_data = alert.split() if len(alert_data) > constant.ALERT_INDEX['alert_len'] \ and alert_data[ constant.ALERT_INDEX['severity_index']] \ in constant.SEVERITY_MAP: occur_time = \ alert_data[constant.ALERT_INDEX['year_index']] + \ ' ' + alert_data[constant.ALERT_INDEX[ 'time_index']].split("+")[0] occur_time = \ int(time.mktime(time.strptime( occur_time, constant.TIME_TYPE))) * 1000 if not query_para or \ (int(query_para['begin_time']) <= occur_time <= int(query_para['end_time'])): description = '' for i in range(4, len(alert_data)): description += alert_data[i] + ' ' severity = \ constant.SEVERITY_MAP.get( alert_data[constant.ALERT_INDEX[ 'severity_index']]) alert_model['alert_id'] = \ alert_data[constant.ALERT_INDEX['id_index']] alert_model['alert_name'] = \ alert_data[constant.ALERT_INDEX['id_index']] alert_model['severity'] = severity alert_model['category'] = constants.Category.FAULT alert_model['type'] = \ constants.EventType.EQUIPMENT_ALARM alert_model['occur_time'] = occur_time alert_model['description'] = description.lstrip() alert_model['match_key'] = \ hashlib.md5( (alert_data[constant.ALERT_INDEX['id_index']] + severity + description).encode()).hexdigest() alert_model['resource_type'] = \ constants.DEFAULT_RESOURCE_TYPE if alert and alert_model and 'CAUSE' in alert: alert_data = alert.split(':') alert_model['location'] = alert_data[-1] if not alert: alert_list.append(alert_model) alert_model = {} return alert_list def list_alerts(self, query_para): try: command = constant.ALERT_INFO_COMMAND if query_para and 'begin_time' in query_para: timeArray = time.gmtime(int(query_para['begin_time']) / 1000) begin_time = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) command += constant.ALERT_TIME % begin_time alert_info = self.ssh_do_exec([command]) alert_array = self.get_table_data(alert_info, True) alert_list = self.format_alert_list(alert_array, query_para) alert_list = \ sorted(alert_list, key=lambda x: x['occur_time'], reverse=True) return alert_list except exception.DelfinException as e: err_msg = "Failed to get alerts from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get alerts from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) @staticmethod def parse_alert(alert): try: alert_info = alert.get(constant.OID_TRAP_DATA) alert_array = alert_info.split(':') if len(alert_array) > 1: description = alert_array[1] alert = alert_array[0].split() if len(alert) > 1: alert_id = alert[0] severity = constant.SEVERITY_MAP.get(alert[1]) if severity == constant.SEVERITY_MAP.get('Information'): return alert_model = { 'alert_id': alert_id, 'alert_name': alert_id, 'severity': severity, 'category': constants.Category.FAULT, 'type': constants.EventType.EQUIPMENT_ALARM, 'occur_time': utils.utcnow_ms(), 'description': description, 'match_key': hashlib.md5( (alert_id + severity + description).encode()).hexdigest(), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': '' } return alert_model except exception.DelfinException as e: err_msg = "Failed to parse alert from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to parse alert from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_ports(self, storage_id): try: ports_list = self.get_fc_port(storage_id) return ports_list except exception.DelfinException as e: err_msg = "Failed to get ports from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get ports from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_fc_port(self, storage_id): try: fc_info = self.ssh_do_exec([constant.FC_PORT_COMMAND]) fc_map_list = \ self.format_data_to_map(fc_info, 'Portname') fc_list = [] speed_info = self.ssh_do_exec([constant.FC_SPEED_COMMAND]) speed_map_list = \ self.format_data_to_map(speed_info, 'FC1') speed_map = speed_map_list[-1] for value_map in fc_map_list: if 'Portname' in value_map: status = value_map.get('Status') health = constants.PortHealthStatus.ABNORMAL if status == 'Good': health = constants.PortHealthStatus.NORMAL connection_status = \ constants.PortConnectionStatus.DISCONNECTED if 'FCLinkisup' in value_map: connection_status = \ constants.PortConnectionStatus.CONNECTED port_id = '' for key in value_map.keys(): if 'HostPort' in key: port_id = key.replace('HostPort', '') break speed = \ int(speed_map.get('FC' + port_id).replace('Gbps', '')) fc_model = { 'name': 'FC' + port_id, 'storage_id': storage_id, 'native_port_id': port_id, 'connection_status': connection_status, 'health_status': health, 'type': constants.PortType.FC, 'speed': speed * units.G, 'max_speed': 8 * units.G, 'wwn': value_map.get('Portname'), } fc_list.append(fc_model) return fc_list except exception.DelfinException as e: err_msg = "Failed to get fc ports from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get fc ports from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_filesystems(self, storage_id): try: fs_list = [] fs_info = self.ssh_do_exec([constant.FS_INFO_COMMAND]) fs_array = self.get_table_data(fs_info) status_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND]) status_array = self.get_table_data(status_info) status_map = {} for status in status_array: status_info = status.split() if len(status_info) > constant.FS_INDEX['status_len']: status_map[status_info[constant.FS_INDEX['id_index']]] = \ [status_info[constant.FS_INDEX['pool_index']], status_info[constant.FS_INDEX['status_index']]] for fs in fs_array: fs_info = list(filter(None, fs.split(' '))) if len(fs_info) > constant.FS_INDEX['detail_len']: total_capacity = \ fs_info[constant.FS_INDEX['total_index']].replace( ' ', '') used_capacity = \ fs_info[constant.FS_INDEX['used_index']].replace( ' ', '').split('(')[0] free_capacity = \ fs_info[constant.FS_INDEX['free_index']].replace( ' ', '').split('(')[0] total_capacity = Tools.get_capacity_size(total_capacity) used_capacity = Tools.get_capacity_size(used_capacity) free_capacity = Tools.get_capacity_size(free_capacity) volume_type = constants.VolumeType.THICK \ if fs_info[constant.FS_INDEX['type_index']] == 'No' \ else constants.VolumeType.THIN pool_id = status_map.get(fs_info[0])[0] \ if status_map.get(fs_info[0]) else None status = status_map.get(fs_info[0])[1] \ if status_map.get(fs_info[0]) else None fs_model = { 'name': fs_info[1], 'storage_id': storage_id, 'native_filesystem_id': fs_info[1], 'native_pool_id': pool_id, 'status': constant.FS_STATUS_MAP[status], 'type': volume_type, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': free_capacity } fs_list.append(fs_model) return fs_list except exception.DelfinException as e: err_msg = "Failed to get filesystem from " \ "hitachi nas: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get filesystem from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_fs_evs(self): fs_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND]) fs_array = self.get_table_data(fs_info) evs_list = [] for fs in fs_array: fs_info_array = fs.split() if len(fs_info_array) > 6: evs_list.append([fs_info_array[0], fs_info_array[4]]) return evs_list def list_quotas(self, storage_id): try: evs_list = self.get_fs_evs() quota_list = [] for evs in evs_list: quota_info = self.ssh_do_exec([ constant.CHECK_EVS % evs[1], constant.QUOTA_INFO_COMMAND % evs[0]]) quota_map_list = \ self.format_data_to_map(quota_info, 'Usage') for quota_map in quota_map_list: quota_type = None user_group_name = None qtree_id = None if 'Target' in quota_map: if 'Group' in quota_map.get('Target'): quota_type = constants.QuotaType.GROUP user_group_name = \ quota_map.get('Target').replace('Group', '') elif 'User' in quota_map.get('Target'): quota_type = constants.QuotaType.USER user_group_name = \ quota_map.get('Target').replace('User', '') elif 'ViVol' in quota_map.get('Target'): quota_type = constants.QuotaType.TREE user_group_name = \ quota_map.get('Target').replace('ViVol', '') qtree_id = evs[0] + '-' + user_group_name quota_id = \ evs[0] + '-' + quota_type + '-' + user_group_name capacity_hard_limit, capacity_soft_limit = None, None file_soft_limit, file_hard_limit = None, None if 'Soft' in quota_map.get('Limit'): capacity_soft_limit = \ quota_map.get('Limit').replace('(Soft)', '') elif 'Hard' in quota_map.get('Limit'): capacity_hard_limit = capacity_soft_limit = \ quota_map.get('Limit').replace('(Hard)', '') if 'Soft' in quota_map.get('Limit1'): file_soft_limit = \ quota_map.get('Limit1').replace('(Soft)', '') elif 'Hard' in quota_map.get('Limit1'): file_soft_limit = file_hard_limit = \ quota_map.get('Limit1').replace('(Hard)', '') quota = { 'native_quota_id': quota_id, 'type': quota_type, 'storage_id': storage_id, 'native_filesystem_id': evs[0], 'native_qtree_id': qtree_id, "capacity_hard_limit": capacity_hard_limit, 'capacity_soft_limit': Tools.get_capacity_size(capacity_soft_limit), "file_hard_limit": file_hard_limit, 'file_soft_limit': file_soft_limit, 'file_count': quota_map.get('FileCount'), 'used_capacity': Tools.get_capacity_size(quota_map.get('Usage')), 'user_group_name': user_group_name } quota_list.append(quota) return quota_list except exception.DelfinException as e: err_msg = "Failed to get storage quota from " \ "hitachi nas: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage quota from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_qtrees(self, storage_id): try: evs_list = self.get_fs_evs() return self.get_qtree(evs_list, storage_id) except exception.DelfinException as e: err_msg = "Failed to get storage qtree from " \ "hitachi nas: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage qtree from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_qtree(self, evs_list, storage_id): qtree_list = [] for evs in evs_list: tree_info = self.ssh_do_exec([ constant.CHECK_EVS % evs[1], constant.TREE_INFO_COMMAND % evs[0]]) tree_map_list = \ self.format_data_to_map( tree_info, 'root', split_key='last modified') for qt_map in tree_map_list: qt_name = '' for key in qt_map: if qt_map[key] == '' and key != 'email': qt_name = key qt_id = evs[0] + '-' + qt_name qt_model = { 'name': qt_name, 'storage_id': storage_id, 'native_qtree_id': qt_id, 'path': qt_map.get('root'), 'native_filesystem_id': evs[0], } qtree_list.append(qt_model) return qtree_list def get_cifs_share(self, evs_list, storage_id): share_list = [] evs_array = [] for evs in evs_list: if evs[1] not in evs_array: evs_array.append(evs[1]) for evs in evs_array: cifs_share = self.ssh_do_exec([ constant.CHECK_EVS % evs, constant.CIFS_SHARE_COMMAND]) cifs_map_list = \ self.format_data_to_map(cifs_share, 'Sharename') for cifs in cifs_map_list: qtree_id = None if 'VirtualVolume' in cifs.get('Sharecomment'): qtree = cifs.get('Sharecomment').split('Volume') if cifs.get('Filesystemlabel'): qtree_id = \ cifs.get('Filesystemlabel') + '-' + qtree[1] if cifs.get('Filesystemlabel'): native_share_id = \ '%s-%s-%s' % (cifs.get('Filesystemlabel'), cifs.get('Sharename'), constants.ShareProtocol.CIFS), else: native_share_id = \ cifs.get('Sharename') + '-' + \ constants.ShareProtocol.CIFS, share = { 'name': cifs.get('Sharename'), 'storage_id': storage_id, 'native_share_id': native_share_id, 'native_qtree_id': qtree_id, 'native_filesystem_id': cifs.get('Filesystemlabel'), 'path': cifs.get('Sharepath'), 'protocol': constants.ShareProtocol.CIFS } share_list.append(share) return share_list def get_nfs_share(self, evs_list, storage_id): share_list = [] evs_array = [] for evs in evs_list: if evs[1] not in evs_array: evs_array.append(evs[1]) for evs in evs_array: nfs_share = self.ssh_do_exec([ constant.CHECK_EVS % evs, constant.NFS_SHARE_COMMAND]) nfs_map_list = \ self.format_data_to_map(nfs_share, 'Exportname') qtree_list = self.get_qtree(evs_list, None) for nfs in nfs_map_list: qtree_id = None for qtree in qtree_list: if nfs.get('Exportpath') == qtree['path'] \ and qtree['native_filesystem_id'] \ == nfs.get('Filesystemlabel'): qtree_id = qtree['native_qtree_id'] if nfs.get('Filesystemlabel'): native_share_id = \ nfs.get('Filesystemlabel') \ + '-' + nfs.get('Exportname') \ + '-' + constants.ShareProtocol.NFS, else: native_share_id = \ nfs.get('Exportname') + '-' +\ constants.ShareProtocol.NFS, share = { 'name': nfs.get('Exportname'), 'storage_id': storage_id, 'native_share_id': native_share_id, 'native_qtree_id': qtree_id, 'native_filesystem_id': nfs.get('Filesystemlabel'), 'path': nfs.get('Exportpath'), 'protocol': constants.ShareProtocol.NFS } share_list.append(share) return share_list def list_shares(self, storage_id): try: evs_list = self.get_fs_evs() share_list = [] share_list.extend(self.get_cifs_share(evs_list, storage_id)) share_list.extend(self.get_nfs_share(evs_list, storage_id)) return share_list except exception.DelfinException as e: err_msg = "Failed to get storage share from " \ "hitachi nas: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage share from " \ "hitachi nas: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) ================================================ FILE: delfin/drivers/hitachi/vsp/__init__.py ================================================ ================================================ FILE: delfin/drivers/hitachi/vsp/consts.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. SOCKET_TIMEOUT = 180 ERROR_SESSION_INVALID_CODE = 403 ERROR_SESSION_IS_BEING_USED_CODE = 409 BLOCK_SIZE = 512 LDEV_NUMBER_OF_PER_REQUEST = 300 SUPPORTED_VSP_SERIES = ('VSP G350', 'VSP G370', 'VSP G700', 'VSP G900', 'VSP F350', 'VSP F370', 'VSP F700', 'VSP F900') # the max number when get volumes in a request MAX_VOLUME_NUMBER = 16384 ================================================ FILE: delfin/drivers/hitachi/vsp/rest_handler.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import time import requests import six from oslo_log import log as logging from delfin import cryptor from delfin import exception from delfin.drivers.hitachi.vsp import consts from delfin.drivers.utils.rest_client import RestClient LOG = logging.getLogger(__name__) class RestHandler(RestClient): COMM_URL = '/ConfigurationManager/v1/objects/storages' LOGOUT_URL = '/ConfigurationManager/v1/objects/sessions/' AUTH_KEY = 'Authorization' def __init__(self, **kwargs): super(RestHandler, self).__init__(**kwargs) self.session_lock = threading.Lock() self.session_id = None self.storage_device_id = None self.device_model = None self.serial_number = None def call(self, url, data=None, method=None, calltimeout=consts.SOCKET_TIMEOUT): try: res = self.call_with_token(url, data, method, calltimeout) if (res.status_code == consts.ERROR_SESSION_INVALID_CODE or res.status_code == consts.ERROR_SESSION_IS_BEING_USED_CODE): LOG.error("Failed to get token=={0}=={1},get token again" .format(res.status_code, res.text)) # if method is logout,return immediately if method == 'DELETE' and RestHandler. \ LOGOUT_URL in url: return res if self.get_token(): res = self.call_with_token(url, data, method, calltimeout) else: LOG.error('Login error,get access_session failed') elif res.status_code == 503: raise exception.InvalidResults(res.text) return res except Exception as e: err_msg = "Get RestHandler.call failed: %s" % (six.text_type(e)) LOG.error(err_msg) raise e def call_with_token(self, url, data, method, calltimeout): auth_key = None if self.session: auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None) if auth_key: self.session.headers[RestHandler.AUTH_KEY] \ = cryptor.decode(auth_key) res = self. \ do_call(url, data, method, calltimeout) if auth_key: self.session.headers[RestHandler.AUTH_KEY] = auth_key return res def get_rest_info(self, url, timeout=consts.SOCKET_TIMEOUT, data=None): result_json = None if self.session and url != RestHandler.COMM_URL: auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None) if auth_key is None: self.get_token() res = self.call(url, data, 'GET', timeout) if res.status_code == 200: result_json = res.json() return result_json def get_token(self): try: succeed = False if self.san_address: url = '%s/%s/sessions' % \ (RestHandler.COMM_URL, self.storage_device_id) data = {} with self.session_lock: if self.session is None: self.init_http_head() self.session.auth = \ requests.auth.HTTPBasicAuth( self.rest_username, cryptor.decode(self.rest_password)) res = self.call_with_token(url, data, 'POST', 30) if res.status_code == 200: succeed = True result = res.json() self.session_id = cryptor.encode( result.get('sessionId')) access_session = 'Session %s' % result.get('token') self.session.headers[ RestHandler.AUTH_KEY] = cryptor.encode( access_session) else: LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", {"url": url, "reason": res.text}) if 'authentication failed' in res.text: raise exception.InvalidUsernameOrPassword() elif 'KART30005-E' in res.text: raise exception.StorageBackendException( six.text_type(res.text)) else: raise exception.BadResponse(res.text) else: LOG.error('Token Parameter error') return succeed except Exception as e: LOG.error("Get token error: %s", six.text_type(e)) raise e def login(self): try: self.get_device_id() except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e def logout(self): try: url = RestHandler.LOGOUT_URL if self.session_id is not None: url = '%s/%s/sessions/%s' % \ (RestHandler.COMM_URL, self.storage_device_id, cryptor.decode(self.session_id)) if self.san_address: self.call(url, method='DELETE') url = None self.session_id = None self.storage_device_id = None self.device_model = None self.serial_number = None self.session = None else: LOG.error('logout error:session id not found') except Exception as err: LOG.error('logout error:{}'.format(err)) raise exception.StorageBackendException( reason='Failed to Logout from restful') def get_device_id(self): try: if self.session is None: self.init_http_head() storage_systems = self.get_system_info() system_info = storage_systems.get('data') for system in system_info: self.storage_device_id = system.get('storageDeviceId') self.device_model = system.get('model') self.serial_number = system.get('serialNumber') if system.get('svpIp'): if system.get('svpIp') == self.rest_host: self.storage_device_id = system.get('storageDeviceId') self.device_model = system.get('model') self.serial_number = system.get('serialNumber') break elif system.get('ctl1Ip') == self.rest_host or \ system.get('ctl2Ip') == self.rest_host: self.storage_device_id = system.get('storageDeviceId') self.device_model = system.get('model') self.serial_number = system.get('serialNumber') break if self.storage_device_id is None: error_msg = f'Get device id fail,' \ f'system info:{storage_systems}' LOG.error(error_msg) raise exception.StorageBackendException(error_msg) except Exception as e: LOG.error("Get device id error: %s", six.text_type(e)) raise e def get_firmware_version(self): url = '%s/%s' % \ (RestHandler.COMM_URL, self.storage_device_id) result_json = self.get_rest_info(url) if result_json is None: return None firmware_version = result_json.get('dkcMicroVersion') return firmware_version def get_capacity(self): url = '%s/%s/total-capacities/instance' % \ (RestHandler.COMM_URL, self.storage_device_id) result_json = self.get_rest_info(url) return result_json def get_all_pools(self): url = '%s/%s/pools' % \ (RestHandler.COMM_URL, self.storage_device_id) result_json = self.get_rest_info(url) return result_json def get_volumes(self, head_id, max_number=consts.LDEV_NUMBER_OF_PER_REQUEST): url = '%s/%s/ldevs?headLdevId=%s&count=%s&ldevOption=defined' % \ (RestHandler.COMM_URL, self.storage_device_id, head_id, max_number) result_json = self.get_rest_info(url) return result_json def get_system_info(self): result_json = self.get_rest_info(RestHandler.COMM_URL, timeout=10) return result_json def get_controllers(self): url = '%s/%s/components/instance' % \ (RestHandler.COMM_URL, self.storage_device_id) result_json = self.get_rest_info(url) return result_json def get_disks(self): url = '%s/%s/drives' % \ (RestHandler.COMM_URL, self.storage_device_id) result_json = self.get_rest_info(url) return result_json def get_all_ports(self): url = '%s/%s/ports' % \ (RestHandler.COMM_URL, self.storage_device_id) result_json = self.get_rest_info(url) return result_json def get_detail_ports(self, port_id): url = '%s/%s/ports/%s' % \ (RestHandler.COMM_URL, self.storage_device_id, port_id) result_json = self.get_rest_info(url) return result_json def get_alerts(self, param, start, end): url = '%s/%s/alerts?%s&start=%s&count=%s' % (RestHandler.COMM_URL, self.storage_device_id, param, start, end) result_json = self.get_rest_info(url) return result_json def get_all_host_groups(self): url = '%s/%s/host-groups' % \ (RestHandler.COMM_URL, self.storage_device_id) result_json = self.get_rest_info(url) return result_json def get_specific_host_group(self, port_id): url = '%s/%s/host-groups?portId=%s' % \ (RestHandler.COMM_URL, self.storage_device_id, port_id) result_json = self.get_rest_info(url) return result_json def get_host_wwn(self, port_id, group_number): url = '%s/%s/host-wwns?portId=%s&hostGroupNumber=%s' % \ (RestHandler.COMM_URL, self.storage_device_id, port_id, group_number) result_json = self.get_rest_info(url) return result_json def get_iscsi_name(self, port_id, group_number): url = '%s/%s/host-iscsis?portId=%s&hostGroupNumber=%s' % \ (RestHandler.COMM_URL, self.storage_device_id, port_id, group_number) result_json = self.get_rest_info(url) return result_json def get_lun_path(self, port_id, group_number): url = '%s/%s/luns?portId=%s&hostGroupNumber=%s&' \ 'isBasicLunInformation=true' % \ (RestHandler.COMM_URL, self.storage_device_id, port_id, group_number) result_json = self.get_rest_info(url) return result_json def get_volumes_with_defined(self): url = '%s/%s/ldevs?ldevOption=defined&count=%s' % \ (RestHandler.COMM_URL, self.storage_device_id, consts.MAX_VOLUME_NUMBER) LOG.info('get volume start time:%s' % time.time()) result_json = self.get_rest_info(url, timeout=None) LOG.info('get volume end time:%s' % time.time()) return result_json ================================================ FILE: delfin/drivers/hitachi/vsp/vsp_stor.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import time import six from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import alert_util from delfin.common import constants from delfin.drivers import driver from delfin.drivers.hitachi.vsp import consts from delfin.drivers.hitachi.vsp import rest_handler LOG = log.getLogger(__name__) class HitachiVspDriver(driver.StorageDriver): POOL_STATUS_MAP = {"POLN": constants.StoragePoolStatus.NORMAL, "POLF": constants.StoragePoolStatus.NORMAL, "POLS": constants.StoragePoolStatus.ABNORMAL, "POLE": constants.StoragePoolStatus.OFFLINE } ALERT_LEVEL_MAP = {"Acute": constants.Severity.CRITICAL, "Serious": constants.Severity.MAJOR, "Moderate": constants.Severity.WARNING, "Service": constants.Severity.INFORMATIONAL } TRAP_ALERT_LEVEL_MAP = { "1.3.6.1.4.1.116.3.11.4.1.1.0.1": constants.Severity.CRITICAL, "1.3.6.1.4.1.116.3.11.4.1.1.0.2": constants.Severity.MAJOR, "1.3.6.1.4.1.116.3.11.4.1.1.0.3": constants.Severity.WARNING, "1.3.6.1.4.1.116.3.11.4.1.1.0.4": constants.Severity.INFORMATIONAL } DISK_LOGIC_TYPE_MAP = {"DATA": constants.DiskLogicalType.MEMBER, "SPARE": constants.DiskLogicalType.SPARE, "FREE": constants.DiskLogicalType.FREE } DISK_PHYSICAL_TYPE_MAP = {"SAS": constants.DiskPhysicalType.SAS, "SATA": constants.DiskPhysicalType.SATA, "SSD": constants.DiskPhysicalType.SSD, "FC": constants.DiskPhysicalType.FC } PORT_TYPE_MAP = {"FIBRE": constants.PortType.FC, "SCSI": constants.PortType.OTHER, "ISCSI": constants.PortType.ETH, "ENAS": constants.PortType.OTHER, "ESCON": constants.PortType.OTHER, "FICON": constants.PortType.FICON, "FCoE": constants.PortType.FCOE, "HNASS": constants.PortType.OTHER, "HNASU": constants.PortType.OTHER } OS_TYPE_MAP = {"HP-UX": constants.HostOSTypes.HP_UX, "SOLARIS": constants.HostOSTypes.SOLARIS, "AIX": constants.HostOSTypes.AIX, "WIN": constants.HostOSTypes.WINDOWS, "LINUX/IRIX": constants.HostOSTypes.LINUX, "TRU64": constants.HostOSTypes.UNKNOWN, "OVMS": constants.HostOSTypes.OPEN_VMS, "NETWARE": constants.HostOSTypes.UNKNOWN, "VMWARE": constants.HostOSTypes.VMWARE_ESX, "VMWARE_EX": constants.HostOSTypes.VMWARE_ESX, "WIN_EX": constants.HostOSTypes.WINDOWS } DISK_STATUS_TYPE = {"NML": constants.DiskStatus.NORMAL, "CPY": constants.DiskStatus.NORMAL, "CPI": constants.DiskStatus.NORMAL, "RSV": constants.DiskStatus.NORMAL, "FAI": constants.DiskStatus.ABNORMAL, "BLK": constants.DiskStatus.ABNORMAL, "WAR": constants.DiskStatus.ABNORMAL, "UNK": constants.DiskStatus.NORMAL, "Unknown": constants.DiskStatus.NORMAL } TIME_PATTERN = '%Y-%m-%dT%H:%M:%S' AUTO_PORT_SPEED = 8 * units.Gi REFCODE_OID = '1.3.6.1.4.1.116.5.11.4.2.3' DESC_OID = '1.3.6.1.4.1.116.5.11.4.2.7' TRAP_TIME_OID = '1.3.6.1.4.1.116.5.11.4.2.6' TRAP_DATE_OID = '1.3.6.1.4.1.116.5.11.4.2.5' TRAP_NICKNAME_OID = '1.3.6.1.4.1.116.5.11.4.2.2' OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' SECONDS_TO_MS = 1000 ALERT_START = 1 CTL_ALERT_COUNT = 255 DKC_ALERT_COUNT = 10239 def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_handler = rest_handler.RestHandler(**kwargs) self.rest_handler.login() def reset_connection(self, context, **kwargs): self.rest_handler.logout() self.rest_handler.verify = kwargs.get('verify', False) self.rest_handler.login() def close_connection(self): self.rest_handler.logout() def get_storage(self, context): self.rest_handler.get_device_id() if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES: capacity_json = self.rest_handler.get_capacity() free_capacity = capacity_json.get("total").get("freeSpace") * \ units.Ki total_capacity = \ capacity_json.get("total").get("totalCapacity") * units.Ki else: free_capacity = 0 total_capacity = 0 pools_info = self.rest_handler.get_all_pools() if pools_info is not None: pools = pools_info.get('data') for pool in pools: total_cap = \ int(pool.get( 'totalPoolCapacity')) * units.Mi free_cap = int( pool.get( 'availableVolumeCapacity')) * units.Mi free_capacity = free_capacity + free_cap total_capacity = total_capacity + total_cap firmware_version = self.rest_handler.get_firmware_version() status = constants.StorageStatus.OFFLINE if firmware_version is not None: status = constants.StorageStatus.NORMAL system_name = '%s_%s' % (self.rest_handler.device_model, self.rest_handler.rest_host) s = { 'name': system_name, 'vendor': 'Hitachi', 'description': 'Hitachi VSP Storage', 'model': str(self.rest_handler.device_model), 'status': status, 'serial_number': str(self.rest_handler.serial_number), 'firmware_version': str(firmware_version), 'location': '', 'raw_capacity': int(total_capacity), 'total_capacity': int(total_capacity), 'used_capacity': int(total_capacity - free_capacity), 'free_capacity': int(free_capacity) } return s def list_storage_pools(self, context): try: pools_info = self.rest_handler.get_all_pools() pool_list = [] pools = pools_info.get('data') for pool in pools: status = self.POOL_STATUS_MAP.get( pool.get('poolStatus'), constants.StoragePoolStatus.ABNORMAL ) storage_type = constants.StorageType.BLOCK total_cap = \ int(pool.get('totalPoolCapacity')) * units.Mi free_cap = int( pool.get('availableVolumeCapacity')) * units.Mi used_cap = total_cap - free_cap p = { 'name': pool.get('poolName'), 'storage_id': self.storage_id, 'native_storage_pool_id': str(pool.get('poolId')), 'description': 'Hitachi VSP Pool', 'status': status, 'storage_type': storage_type, 'total_capacity': int(total_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap), } pool_list.append(p) return pool_list except exception.DelfinException as err: err_msg = "Failed to get pool metrics from hitachi vsp: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise err except Exception as e: err_msg = "Failed to get pool metrics from hitachi vsp: %s" % \ (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) @staticmethod def to_vsp_lun_id_format(lun_id): hex_str = hex(lun_id) result = '' hex_lun_id = hex_str[2::].rjust(6, '0') is_first = True for i in range(0, len(hex_lun_id), 2): if is_first is True: result = '%s' % (hex_lun_id[i:i + 2]) is_first = False else: result = '%s:%s' % (result, hex_lun_id[i:i + 2]) return result def list_volumes(self, context): try: volume_list = [] volumes = self.rest_handler.get_volumes_with_defined() if not volumes: return volume_list volume_list = self.parse_volumes(volumes) if len(volumes.get('data')) >= consts.MAX_VOLUME_NUMBER: head_id = volumes.get('data')[-1].get('ldevId') + 1 while True: volumes_info = self.rest_handler.get_volumes(head_id) if not volumes_info or not volumes_info.get('data'): break volume_list.extend(self.parse_volumes(volumes_info)) head_id = volumes_info.get('data')[-1].get('ldevId') + 1 except exception.DelfinException as err: err_msg = "Failed to get volume from hitachi vsp: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise err except Exception as e: err_msg = "Failed to get volume from hitachi vsp: %s" % \ (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return volume_list def parse_volumes(self, volumes): try: volume_list = [] volumes = volumes.get('data') for volume in volumes: orig_pool_id = volume.get('poolId') compressed = False deduplicated = False if volume.get('dataReductionMode') == \ 'compression_deduplication': deduplicated = True compressed = True if volume.get('dataReductionMode') == 'compression': compressed = True if volume.get('status') == 'NML': status = 'normal' else: status = 'abnormal' vol_type = constants.VolumeType.THICK for voltype in volume.get('attributes'): if voltype == 'HTI': vol_type = constants.VolumeType.THIN total_cap = \ int(volume.get('blockCapacity')) * consts.BLOCK_SIZE used_cap = \ int(volume.get('blockCapacity')) * consts.BLOCK_SIZE # Because there is only subscribed capacity in device,so free # capacity always 0 free_cap = 0 native_volume_id = HitachiVspDriver.to_vsp_lun_id_format( volume.get('ldevId')) if volume.get('label'): name = volume.get('label') else: name = native_volume_id v = { 'name': name, 'storage_id': self.storage_id, 'description': 'Hitachi VSP volume', 'status': status, 'native_volume_id': str(native_volume_id), 'native_storage_pool_id': orig_pool_id, 'type': vol_type, 'total_capacity': total_cap, 'used_capacity': used_cap, 'free_capacity': free_cap, 'compressed': compressed, 'deduplicated': deduplicated, } volume_list.append(v) return volume_list except exception.DelfinException as err: err_msg = "Failed to get volumes metrics from hitachi vsp: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise err except Exception as e: err_msg = "Failed to get volumes metrics from hitachi vsp: %s" % \ (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_controllers(self, context): try: controller_list = [] controller_info = self.rest_handler.get_controllers() if controller_info is not None: con_entries = controller_info.get('ctls') for control in con_entries: status = constants.ControllerStatus.OFFLINE if control.get('status') == 'Normal': status = constants.ControllerStatus.NORMAL controller_result = { 'name': control.get('location'), 'storage_id': self.storage_id, 'native_controller_id': control.get('location'), 'status': status, 'location': control.get('location') } controller_list.append(controller_result) return controller_list except Exception as err: err_msg = "Failed to get controller attributes from vsp: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_ports(self, context): try: port_list = [] ports = self.rest_handler.get_all_ports() if ports is None: return port_list port_entries = ports.get('data') for port in port_entries: ipv4 = None ipv4_mask = None ipv6 = None wwn = None status = constants.PortHealthStatus.NORMAL conn_status = constants.PortConnectionStatus.CONNECTED if port.get('portType') == 'ISCSI': iscsi_port = self.rest_handler.get_detail_ports( port.get('portId')) ipv4 = iscsi_port.get('ipv4Address') ipv4_mask = iscsi_port.get('ipv4Subnetmask') if iscsi_port.get( 'ipv6LinkLocalAddress', {}).get("status") == 'VAL': ipv6 = iscsi_port.get( 'ipv6LinkLocalAddress', {}).get("address") speed = HitachiVspDriver.AUTO_PORT_SPEED if \ port.get('portSpeed') == 'AUT' else \ int(port.get('portSpeed')[:-1]) * units.Gi if port.get('portType') == 'FIBRE': wwn = port.get('wwn') if wwn: wwn = wwn.upper() port_type = HitachiVspDriver.PORT_TYPE_MAP.get( port.get('portType'), constants.PortType.OTHER) port_result = { 'name': port.get('portId'), 'storage_id': self.storage_id, 'native_port_id': port.get('portId'), 'location': port.get('portId'), 'connection_status': conn_status, 'health_status': status, 'type': port_type, 'logical_type': '', 'max_speed': speed, 'mac_address': port.get('macAddress'), 'wwn': wwn, 'ipv4': ipv4, 'ipv4_mask': ipv4_mask, 'ipv6': ipv6 } port_list.append(port_result) return port_list except Exception as err: err_msg = "Failed to get ports attributes from vsp: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_disks(self, context): try: disks = self.rest_handler.get_disks() disk_list = [] if disks is not None: disk_entries = disks.get('data') for disk in disk_entries: status = HitachiVspDriver.DISK_STATUS_TYPE.get( disk.get('status'), constants.DiskStatus.NORMAL) if disk.get('driveTypeName'): type_name = 'SSD' if 'SSD' in \ disk.get('driveTypeName').upper()\ else disk.get('driveTypeName') physical_type = \ HitachiVspDriver.DISK_PHYSICAL_TYPE_MAP.get( type_name, constants.DiskPhysicalType.UNKNOWN) else: physical_type = constants.DiskPhysicalType.UNKNOWN logical_type = HitachiVspDriver.DISK_LOGIC_TYPE_MAP.get( disk.get('usageType'), constants.DiskLogicalType.UNKNOWN) disk_result = { 'name': disk.get('driveLocationId'), 'storage_id': self.storage_id, 'native_disk_id': disk.get('driveLocationId'), 'serial_number': disk.get('serialNumber'), 'speed': int(disk.get('driveSpeed', 0)), 'capacity': int(disk.get('totalCapacity', 0)) * units.Gi, 'status': status, 'physical_type': physical_type, 'logical_type': logical_type, 'native_disk_group_id': disk.get('parityGroupId'), 'location': disk.get('driveLocationId') } disk_list.append(disk_result) return disk_list except Exception as err: err_msg = "Failed to get disk attributes from : %s" % \ (six.text_type(err)) raise exception.InvalidResults(err_msg) @staticmethod def parse_queried_alerts(alerts, alert_list, query_para=None): if not alerts: return for alert in alerts: occur_time = int(time.mktime(time.strptime( alert.get('occurenceTime'), HitachiVspDriver.TIME_PATTERN))) * \ HitachiVspDriver.SECONDS_TO_MS if not alert_util.is_alert_in_time_range(query_para, occur_time): continue a = { 'location': alert.get('location'), 'alert_id': alert.get('alertId'), 'sequence_number': alert.get('alertIndex'), 'description': alert.get('errorDetail'), 'alert_name': alert.get('errorSection'), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'occur_time': occur_time, 'category': constants.Category.FAULT, 'type': constants.EventType.EQUIPMENT_ALARM, 'severity': HitachiVspDriver.ALERT_LEVEL_MAP.get( alert.get('errorLevel'), constants.Severity.INFORMATIONAL ) } alert_list.append(a) def list_alerts(self, context, query_para=None): alert_list = [] if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES: alerts_info_ctl1 = self.rest_handler.get_alerts( 'type=CTL1', HitachiVspDriver.ALERT_START, HitachiVspDriver.CTL_ALERT_COUNT) alerts_info_ctl2 = self.rest_handler.get_alerts( 'type=CTL2', HitachiVspDriver.ALERT_START, HitachiVspDriver.CTL_ALERT_COUNT) alerts_info_dkc = self.rest_handler.get_alerts( 'type=DKC', HitachiVspDriver.ALERT_START, HitachiVspDriver.DKC_ALERT_COUNT) HitachiVspDriver.parse_queried_alerts(alerts_info_ctl1, alert_list, query_para) HitachiVspDriver.parse_queried_alerts(alerts_info_ctl2, alert_list, query_para) HitachiVspDriver.parse_queried_alerts(alerts_info_dkc, alert_list, query_para) else: err_msg = "list_alerts is not supported in model %s" % \ self.rest_handler.device_model LOG.error(err_msg) raise NotImplementedError(err_msg) return alert_list def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): try: alert_model = dict() alert_model['alert_id'] = alert.get(HitachiVspDriver.REFCODE_OID) alert_model['alert_name'] = alert.get(HitachiVspDriver.DESC_OID) severity = HitachiVspDriver.TRAP_ALERT_LEVEL_MAP.get( alert.get(HitachiVspDriver.OID_SEVERITY), constants.Severity.INFORMATIONAL ) alert_model['severity'] = severity alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM aler_time = '%s%s' % (alert.get(HitachiVspDriver.TRAP_DATE_OID), alert.get(HitachiVspDriver.TRAP_TIME_OID)) pattern = '%Y/%m/%d%H:%M:%S' occur_time = time.strptime(aler_time, pattern) alert_model['occur_time'] = int(time.mktime(occur_time) * HitachiVspDriver.SECONDS_TO_MS) alert_model['description'] = alert.get(HitachiVspDriver.DESC_OID) alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['location'] = alert.get(HitachiVspDriver. TRAP_NICKNAME_OID) alert_model['match_key'] = hashlib.md5( alert.get(HitachiVspDriver.DESC_OID).encode()).hexdigest() return alert_model except Exception as e: LOG.error(e) msg = ("Failed to build alert model as some attributes missing in" " alert message:%s") % (six.text_type(e)) raise exception.InvalidResults(msg) def clear_alert(self, context, alert): pass @staticmethod def handle_group_with_port(group_info): group_list = {} if not group_info: return group_list group_entries = group_info.get('data') for group in group_entries: if group_list.get(group.get('portId')): group_list[group.get('portId')].append( group.get('hostGroupNumber')) else: group_list[group.get('portId')] = [] group_list[group.get('portId')].append( group.get('hostGroupNumber')) return group_list @staticmethod def get_host_info(data, storage_id, host_list, type, os_type): if data: host_entries = data.get('data') if not host_entries: return True for host in host_entries: if type == 'iscsi': host_id = host.get('hostIscsiId') host_name = host.get('iscsiNickname') if \ host.get('iscsiNickname') != '-' \ else host.get('iscsiName') else: host_id = host.get('hostWwnId') host_name = host.get('wwnNickname') if \ host.get('wwnNickname') != '-' \ else host.get('hostWwn') host_result = { "name": host_name, "storage_id": storage_id, "native_storage_host_id": host_id.replace(",", "_"), "os_type": os_type, "status": constants.HostStatus.NORMAL } host_list.append(host_result) return True def list_storage_hosts(self, context): try: host_groups = self.rest_handler.get_all_host_groups() host_list = [] if not host_groups: return host_list group_with_port = HitachiVspDriver.handle_group_with_port( host_groups) for port in group_with_port: kwargs = { 'method': 'host', 'port': port, 'result': host_list } self.handle_san_info(**kwargs) return host_list except Exception as e: LOG.error("Failed to get host from vsp") raise e @staticmethod def get_initiator_from_host(data, storage_id, initiator_list, type): if data: host_entries = data.get('data') if not host_entries: return True for host in host_entries: if type == 'iscsi': initiator_id = host.get('hostIscsiId') init_type = constants.InitiatorType.ISCSI init_name = host.get('iscsiName') else: initiator_id = host.get('hostWwnId') init_type = constants.InitiatorType.FC init_name = host.get('hostWwn') for initiator in initiator_list: if initiator.get('wwn') == init_name: continue init_result = { "name": init_name, "storage_id": storage_id, "native_storage_host_initiator_id": init_name, "wwn": init_name, "status": constants.InitiatorStatus.ONLINE, "type": init_type, "alias": host.get('portId'), "native_storage_host_id": initiator_id.replace(",", "_") } initiator_list.append(init_result) return True def list_storage_host_initiators(self, context): try: initiator_list = [] host_groups = self.rest_handler.get_all_host_groups() if not host_groups: return initiator_list group_with_port = HitachiVspDriver.handle_group_with_port( host_groups) for port in group_with_port: kwargs = { 'method': 'initator', 'port': port, 'result': initiator_list } self.handle_san_info(**kwargs) return initiator_list except Exception as e: LOG.error("Failed to get initiators from vsp") raise e @staticmethod def get_host_ids(data, target, host_ids, host_grp_relation_list, storage_id, group_id): if data: host_entries = data.get('data') if not host_entries: return True for host in host_entries: if host.get(target): host_ids.append(host.get(target).replace(",", "_")) relation = { 'storage_id': storage_id, 'native_storage_host_group_id': group_id, 'native_storage_host_id': host.get(target).replace(",", "_") } host_grp_relation_list.append(relation) def list_storage_host_groups(self, context): try: host_groups = self.rest_handler.get_all_host_groups() host_group_list = [] host_grp_relation_list = [] if not host_groups: return host_group_list group_with_port = HitachiVspDriver.handle_group_with_port( host_groups) for port in group_with_port: kwargs = { 'method': 'group', 'port': port, 'result': host_grp_relation_list, 'group_list': host_group_list } self.handle_san_info(**kwargs) result = { 'storage_host_groups': host_group_list, 'storage_host_grp_host_rels': host_grp_relation_list } return result except Exception: LOG.error("Failed to get host_groups from vsp") raise def handle_lun_path(self, **kwargs): view_list = [] views = self.rest_handler.get_lun_path( kwargs.get('port'), kwargs.get('group')) if not views: return None view_entries = views.get('data') if not view_entries: return None for view in view_entries: group_id = '%s_%s' % (view.get('portId'), view.get('hostGroupNumber')) view_result = { "name": view.get('lunId'), "native_storage_host_group_id": group_id, "storage_id": self.storage_id, "native_volume_id": HitachiVspDriver.to_vsp_lun_id_format( view.get('ldevId')), "native_masking_view_id": view.get('lunId').replace(",", "_"), } kwargs.get('result').append(view_result) return view_list def list_masking_views(self, context): try: view_list = [] host_groups = self.rest_handler.get_all_host_groups() if not host_groups: return view_list group_data = host_groups.get('data') for group in group_data: kwargs = { 'group': group.get('hostGroupNumber'), 'port': group.get('portId'), 'result': view_list } self.handle_lun_path(**kwargs) return view_list except Exception as e: LOG.error("Failed to get views from vsp") raise e def handle_san_info(self, **kwargs): groups = self.rest_handler.get_specific_host_group( kwargs.get('port')) group_data = groups.get('data') for specific_group in group_data: iscsis = None wwns = None if specific_group.get('iscsiName'): iscsis = self.rest_handler.get_iscsi_name( specific_group.get('portId'), specific_group.get('hostGroupNumber')) else: wwns = self.rest_handler.get_host_wwn( specific_group.get('portId'), specific_group.get('hostGroupNumber')) if kwargs.get('method') == 'host': os_type = HitachiVspDriver.OS_TYPE_MAP.get( specific_group.get('hostMode'), constants.HostOSTypes.UNKNOWN) if specific_group.get('iscsiName'): HitachiVspDriver.get_host_info( iscsis, self.storage_id, kwargs.get('result'), 'iscsi', os_type) else: HitachiVspDriver.get_host_info( wwns, self.storage_id, kwargs.get('result'), 'fc', os_type) elif kwargs.get('method') == 'group': host_ids = [] group_id = specific_group.get('hostGroupId').replace(",", "_") if specific_group.get('iscsiName'): HitachiVspDriver.get_host_ids( iscsis, 'hostIscsiId', host_ids, kwargs.get('result'), self.storage_id, group_id) else: HitachiVspDriver.get_host_ids( wwns, 'hostWwnId', host_ids, kwargs.get('result'), self.storage_id, group_id) group_result = { "name": specific_group.get('hostGroupName'), "storage_id": self.storage_id, "native_storage_host_group_id": group_id, "storage_hosts": ','.join(host_ids) } kwargs.get('group_list').append(group_result) else: if specific_group.get('iscsiName'): HitachiVspDriver.get_initiator_from_host( iscsis, self.storage_id, kwargs.get('result'), 'iscsi') else: HitachiVspDriver.get_initiator_from_host( wwns, self.storage_id, kwargs.get('result'), 'fc') ================================================ FILE: delfin/drivers/hpe/__init__.py ================================================ ================================================ FILE: delfin/drivers/hpe/hpe_3par/__init__.py ================================================ ================================================ FILE: delfin/drivers/hpe/hpe_3par/alert_handler.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import six from oslo_log import log as logging from delfin import exception from delfin.common import constants from delfin.drivers.hpe.hpe_3par import consts from delfin.i18n import _ LOG = logging.getLogger(__name__) class AlertHandler(object): """Alert handling functions for Hpe3 parstor driver""" OID_MESSAGECODE = '1.3.6.1.4.1.12925.1.7.1.8.1' OID_SEVERITY = '1.3.6.1.4.1.12925.1.7.1.2.1' OID_STATE = '1.3.6.1.4.1.12925.1.7.1.9.1' OID_ID = '1.3.6.1.4.1.12925.1.7.1.7.1' OID_TIMEOCCURRED = '1.3.6.1.4.1.12925.1.7.1.3.1' OID_DETAILS = '1.3.6.1.4.1.12925.1.7.1.6.1' OID_COMPONENT = '1.3.6.1.4.1.12925.1.7.1.5.1' # Translation of trap severity to alert model severity SEVERITY_MAP = {"1": constants.Severity.CRITICAL, "2": constants.Severity.MAJOR, "3": constants.Severity.MINOR, "4": constants.Severity.WARNING, "0": constants.Severity.FATAL, "5": constants.Severity.INFORMATIONAL, "6": constants.Severity.NOT_SPECIFIED} # Translation of trap alert category to alert model category CATEGORY_MAP = {"0": constants.Category.NOT_SPECIFIED, "1": constants.Category.FAULT, "2": constants.Category.RECOVERY, "3": constants.Category.RECOVERY, "4": constants.Category.RECOVERY, "5": constants.Category.RECOVERY} ALERT_KEY_MAP = {"Id": "sequence_number", "State": "category", "MessageCode": "message_code", "Time": "occur_time", "Severity": "severity", "Type": "alert_name", "Message": "description", "Component": "location" } ALERT_LEVEL_MAP = {"Critical": constants.Severity.CRITICAL, "Major": constants.Severity.MAJOR, "Minor": constants.Severity.MINOR, "Degraded": constants.Severity.WARNING, "Fatal": constants.Severity.FATAL, "Informational": constants.Severity.INFORMATIONAL, "Debug": constants.Severity.NOT_SPECIFIED } # Attributes expected in alert info to proceed with model filling _mandatory_alert_attributes = ( OID_MESSAGECODE, OID_SEVERITY, OID_STATE, OID_ID, OID_TIMEOCCURRED, OID_DETAILS, OID_COMPONENT ) # Convert received time to epoch format TIME_PATTERN = '%Y-%m-%d %H:%M:%S' def __init__(self, rest_handler=None, ssh_handler=None): self.rest_handler = rest_handler self.ssh_handler = ssh_handler @staticmethod def parse_alert(context, alert): """Parse alert data got from alert manager and fill the alert model.""" # Check for mandatory alert attributes for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr raise exception.InvalidInput(msg) try: alert_model = dict() # These information are sourced from device registration info alert_model['alert_id'] = ("0x%07x" % int( alert.get(AlertHandler.OID_MESSAGECODE))) alert_model['alert_name'] = AlertHandler.get_alert_type(alert.get( AlertHandler.OID_MESSAGECODE)) alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert.get(AlertHandler.OID_SEVERITY), constants.Severity.NOT_SPECIFIED) alert_model['category'] = AlertHandler.CATEGORY_MAP.get( alert.get(AlertHandler.OID_STATE), constants.Category.NOT_SPECIFIED) alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert.get(AlertHandler.OID_ID) alert_model['occur_time'] = AlertHandler.get_time_stamp( alert.get(AlertHandler.OID_TIMEOCCURRED)) alert_model['description'] = alert.get(AlertHandler.OID_DETAILS) alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['location'] = alert.get(AlertHandler.OID_COMPONENT) if alert.get(AlertHandler.OID_STATE) == '5': alert_model['clear_category'] = constants.ClearType.AUTOMATIC return alert_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing " "in alert message.")) raise exception.InvalidResults(msg) def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" # Currently not implemented pass def remove_trap_config(self, context, storage_id, trap_config): """Remove trap receiver configuration from storage system.""" # Currently not implemented pass def clear_alert(self, context, alert): """Clear alert from storage system. Remove command: removealert """ try: if alert: self.ssh_handler.remove_alerts(alert) LOG.info("Clear alert %s successfully." % alert) except exception.DelfinException as e: err_msg = "Remove alert %s failed: %s" % (alert, e.msg) LOG.error(err_msg) raise e except Exception as e: err_msg = "Remove alert %s failed: %s" % (alert, six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) @staticmethod def judge_alert_time(map, query_para): if len(map) <= 1: return False if query_para is None and len(map) > 1: return True occur_time = AlertHandler.get_time_stamp(map.get('occur_time')) if query_para.get('begin_time') and query_para.get('end_time'): if occur_time >= int(query_para.get('begin_time')) and \ occur_time <= int(query_para.get('end_time')): return True elif query_para.get('begin_time'): if occur_time >= int(query_para.get('begin_time')): return True elif query_para.get('end_time'): if occur_time <= int(query_para.get('end_time')): return True return False def handle_alters(self, alertlist, query_para): map = {} alert_list = [] for alertinfo in alertlist: strline = alertinfo if strline is not None and strline != '': strinfo = strline.split(': ', 1) strinfo[0] = strinfo[0].replace(" ", "") key = self.ALERT_KEY_MAP.get( strinfo[0]) and self.ALERT_KEY_MAP.get( strinfo[0]) or '' value = self.ALERT_KEY_MAP.get( strinfo[0]) and strinfo[1] or '' map[key] = value elif AlertHandler.judge_alert_time(map, query_para): severity = self.ALERT_LEVEL_MAP.get(map.get('severity')) category = map.get('category') == 'New' and 'Fault' or '' occur_time = AlertHandler.get_time_stamp(map.get('occur_time')) alert_model = { 'alert_id': map.get('message_code'), 'alert_name': map.get('alert_name'), 'severity': severity, 'category': category, 'type': constants.EventType.EQUIPMENT_ALARM, 'sequence_number': map.get('sequence_number'), 'occur_time': occur_time, 'description': map.get('description'), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': map.get('location') } alert_list.append(alert_model) map = {} return alert_list def list_alerts(self, context, query_para): try: # Get list of Hpe3parStor alerts try: reslist = self.ssh_handler.get_all_alerts() except Exception as e: err_msg = "Failed to ssh Hpe3parStor: %s" % \ (six.text_type(e)) LOG.error(err_msg) raise exception.SSHException(err_msg) alertlist = reslist.split('\n') return self.handle_alters(alertlist, query_para) except exception.DelfinException as e: err_msg = "Get alerts failed: %s" % (e.msg) LOG.error(err_msg) raise e except Exception as e: err_msg = "Get alert failed: %s" % (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) @staticmethod def get_time_stamp(time_str): """ Time stamp to time conversion """ time_stamp = '' try: if time_str: if (len(time_str.split()) == 3): time_str = time_str.rsplit(' ', 1)[0] # Convert to time array first time_array = time.strptime(time_str, AlertHandler.TIME_PATTERN) # Convert to timestamps to milliseconds time_stamp = int(time.mktime(time_array) * 1000) except Exception as e: LOG.error(e) return time_stamp @staticmethod def get_alert_type(message_code): """ Get alert type :param str message_code: alert's message_code. :return: returns alert's type """ re = '' try: if message_code is not None: message_key = ("0x%07x" % int(message_code)) re = consts.HPE3PAR_ALERT_CODE.get(message_key) except Exception as e: LOG.error(e) return re ================================================ FILE: delfin/drivers/hpe/hpe_3par/component_handler.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import re import time import six from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import constants from delfin.drivers.hpe.hpe_3par import consts LOG = log.getLogger(__name__) class ComponentHandler(): COMPONENT_HEALTH = 'The following components are healthy' SYSTEM_HEALTH = 'System is healthy' HPE3PAR_VERSION = 'Superclass' HPE3PAR_VENDOR = 'HPE' STATUS_MAP = {1: constants.StoragePoolStatus.NORMAL, 2: constants.StoragePoolStatus.ABNORMAL, 3: constants.StoragePoolStatus.ABNORMAL, 99: constants.StoragePoolStatus.OFFLINE} VOL_TYPE_MAP = {1: constants.VolumeType.THICK, 2: constants.VolumeType.THIN, 3: constants.VolumeType.THIN, 4: constants.VolumeType.THICK, 5: constants.VolumeType.THICK, 6: constants.VolumeType.THIN, 7: constants.VolumeType.THICK} def __init__(self, rest_handler=None, ssh_handler=None): self.rest_handler = rest_handler self.ssh_handler = ssh_handler def set_storage_id(self, storage_id): self.storage_id = storage_id def get_storage(self, context): storage = self.rest_handler.get_storage() status = constants.StorageStatus.NORMAL if storage: try: # Check the hardware and software health # status of the storage system re_str = self.ssh_handler.get_health_state() if 'degraded' in re_str.lower() or 'failed' in re_str.lower(): status = constants.StorageStatus.ABNORMAL except Exception: status = constants.StorageStatus.ABNORMAL LOG.error('SSH check health Failed!') free_cap = int(storage.get('freeCapacityMiB')) * units.Mi used_cap = int(storage.get('allocatedCapacityMiB')) * units.Mi total_cap = free_cap + used_cap raw_cap = int(storage.get('totalCapacityMiB')) * units.Mi result = { 'name': storage.get('name'), 'vendor': ComponentHandler.HPE3PAR_VENDOR, 'model': storage.get('model'), 'status': status, 'serial_number': storage.get('serialNumber'), 'firmware_version': storage.get('systemVersion'), 'location': storage.get('location'), 'total_capacity': total_cap, 'raw_capacity': raw_cap, 'used_capacity': used_cap, 'free_capacity': free_cap } else: # If no data is returned, it indicates that there # may be a problem with the network or the device. # Default return OFFLINE result = { 'status': constants.StorageStatus.OFFLINE } return result def list_storage_pools(self, context): try: # Get list of Hpe3parStor pool details pools = self.rest_handler.get_all_pools() pool_list = [] if pools is not None: members = pools.get('members') for pool in (members or []): # Get pool status 1=normal 2,3=abnormal 99=offline status = self.STATUS_MAP.get(pool.get('state')) # Get pool storage_type default block pool_type = constants.StorageType.BLOCK usr_used = int(pool['UsrUsage']['usedMiB']) * units.Mi sa_used = int(pool['SAUsage']['usedMiB']) * units.Mi sd_used = int(pool['SDUsage']['usedMiB']) * units.Mi usr_total = int(pool['UsrUsage']['totalMiB']) * units.Mi sa_total = int(pool['SAUsage']['totalMiB']) * units.Mi sd_total = int(pool['SDUsage']['totalMiB']) * units.Mi total_cap = usr_total + sa_total + sd_total used_cap = usr_used + sa_used + sd_used free_cap = total_cap - used_cap usr_subcap = int( pool['UsrUsage']['rawTotalMiB']) * units.Mi sa_subcap = int(pool['SAUsage']['rawTotalMiB']) * units.Mi sd_subcap = int(pool['SDUsage']['rawTotalMiB']) * units.Mi subscribed_cap = usr_subcap + sa_subcap + sd_subcap p = { 'name': pool.get('name'), 'storage_id': self.storage_id, 'native_storage_pool_id': str(pool.get('id')), 'description': 'Hpe 3par CPG:%s' % pool.get('name'), 'status': status, 'storage_type': pool_type, 'total_capacity': total_cap, 'subscribed_capacity': subscribed_cap, 'used_capacity': used_cap, 'free_capacity': free_cap } pool_list.append(p) return pool_list except exception.DelfinException as e: err_msg = "Failed to get pool metrics from Hpe3parStor: %s" % \ (e.msg) LOG.error(err_msg) raise e except Exception as e: err_msg = "Failed to get pool metrics from Hpe3parStor: %s" % \ (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def handler_volume(self, volumes, pool_ids): volume_list = [] if volumes is None: return else: members = volumes.get('members') for volume in (members or []): status = self.STATUS_MAP.get(volume.get('state')) orig_pool_name = volume.get('userCPG', '') compressed = True deduplicated = True if volume.get('compressionState') and volume.get( 'compressionState') != 1: compressed = False if volume.get('deduplicationState') and volume.get( 'deduplicationState') != 1: deduplicated = False vol_type = self.VOL_TYPE_MAP.get( volume.get('provisioningType')) # Virtual size of volume in MiB (10242bytes). usr_used = int( volume['userSpace']['usedMiB']) * units.Mi total_cap = int(volume['sizeMiB']) * units.Mi used_cap = usr_used free_cap = total_cap - used_cap v = { 'name': volume.get('name'), 'storage_id': self.storage_id, 'description': volume.get('comment'), 'status': status, 'native_volume_id': str(volume.get('id')), 'native_storage_pool_id': pool_ids.get(orig_pool_name, ''), 'wwn': volume.get('wwn'), 'type': vol_type, 'total_capacity': total_cap, 'used_capacity': used_cap, 'free_capacity': free_cap, 'compressed': compressed, 'deduplicated': deduplicated } volume_list.append(v) return volume_list def list_volumes(self, context): try: volumes = self.rest_handler.get_all_volumes() pools = self.rest_handler.get_all_pools() pool_ids = {} if pools is not None: members = pools.get('members') for pool in (members or []): pool_ids[pool.get('name')] = pool.get('id') return self.handler_volume(volumes, pool_ids) except exception.DelfinException as e: err_msg = "Failed to get list volumes from Hpe3parStor: %s" % \ (e.msg) LOG.error(err_msg) raise e except Exception as e: err_msg = "Failed to get list volumes from Hpe3parStor: %s" % \ (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_controllers(self, storage_id): controllers = self.ssh_handler.get_controllers() controller_list = [] if controllers: node_cpu_map = self.ssh_handler.get_controllers_cpu() node_version_map = self.ssh_handler.get_controllers_version() for controller in controllers: node_id = controller.get('node') memory_size = int(controller.get('controlmem(mb)', '0')) * units.Mi + int( controller.get('datamem(mb)', '0')) * units.Mi cpu_info = '' cpu_count = None if node_cpu_map and node_cpu_map.get(node_id): cpu_count = 0 cpu_info_map = node_cpu_map.get(node_id) cpu_info_keys = list(cpu_info_map.keys()) for cpu_key in cpu_info_keys: if cpu_info: cpu_info = '%s%s' % (cpu_info, ',') cpu_info = '%s%s * %s MHz' % ( cpu_info, cpu_info_map.get(cpu_key), cpu_key) cpu_count += cpu_info_map.get(cpu_key) soft_version = None if node_version_map: soft_version = node_version_map.get(node_id, '') controller_model = { 'name': controller.get('name'), 'storage_id': storage_id, 'native_controller_id': node_id, 'status': consts.CONTROLLER_STATUS_MAP.get( controller.get('state', '').upper(), constants.ControllerStatus.OFFLINE), 'location': None, 'soft_version': soft_version, 'cpu_info': cpu_info, 'cpu_count': cpu_count, 'memory_size': str(memory_size) } controller_list.append(controller_model) return controller_list def list_disks(self, storage_id): disks = self.ssh_handler.get_disks() disk_list = [] if disks: disks_inventory_map = self.ssh_handler.get_disks_inventory() for disk in disks: disk_id = disk.get('id') status = consts.DISK_STATUS_MAP.get( disk.get('state', '').upper(), constants.DiskStatus.ABNORMAL) total = 0 if disk.get('total'): total = float(disk.get("total")) elif disk.get('size_mb'): total = float(disk.get("size_mb")) capacity = int(total * units.Mi) serial_number = None manufacturer = None model = None firmware = None if disks_inventory_map: inventory_map = disks_inventory_map.get(disk_id) if inventory_map: serial_number = inventory_map.get('disk_serial') manufacturer = inventory_map.get('disk_mfr') model = inventory_map.get('disk_model') firmware = inventory_map.get('disk_fw_rev') speed = None if str(disk.get('rpm')).isdigit(): speed = int(disk.get('rpm')) * units.k disk_model = { 'name': disk.get('cagepos'), 'storage_id': storage_id, 'native_disk_id': disk_id, 'serial_number': serial_number, 'manufacturer': manufacturer, 'model': model, 'firmware': firmware, 'speed': speed, 'capacity': capacity, 'status': status, 'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get( disk.get('type').upper(), constants.DiskPhysicalType.UNKNOWN), 'logical_type': None, 'health_score': None, 'native_disk_group_id': None, 'location': disk.get('cagepos') } disk_list.append(disk_model) return disk_list def list_ports(self, storage_id): ports = self.ssh_handler.get_ports() port_list = [] if ports: ports_inventory_map = self.ssh_handler.get_ports_inventory() ports_config_map = self.ssh_handler.get_ports_config() ports_iscsi_map = self.ssh_handler.get_ports_iscsi() ports_rcip_map = self.ssh_handler.get_ports_rcip() ports_connected_map = self.ssh_handler.get_ports_connected() ports_fcoe_map = self.ssh_handler.get_ports_fcoe() port_fs_map = self.ssh_handler.get_ports_fs() for port in ports: port_id = port.get('n:s:p') port_type = '' if ports_inventory_map: port_type = ports_inventory_map.get(port_id, '') max_speed = '' if ports_config_map: max_speed = ports_config_map.get(port_id, '') ip_addr = None ip_mask = None ipv4 = None ipv4_mask = None ipv6 = None ipv6_mask = None rate = '' if ports_connected_map: rate = ports_connected_map.get(port_id, '') if not ip_addr and ports_iscsi_map: iscsi_map = ports_iscsi_map.get(port_id) if iscsi_map: ip_addr = iscsi_map.get('ipaddr') ip_mask = iscsi_map.get('netmask/prefixlen') rate = iscsi_map.get('rate') if not ip_addr and ports_rcip_map: rcip_map = ports_rcip_map.get(port_id) if rcip_map: ip_addr = rcip_map.get('ipaddr') ip_mask = rcip_map.get('netmask') rate = rcip_map.get('rate') if not ip_addr and port_fs_map: fs_map = port_fs_map.get(port_id) if fs_map: ip_addr = fs_map.get('ipaddr') ip_mask = fs_map.get('netmask') rate = fs_map.get('rate') if not rate and ports_fcoe_map: fcoe_map = ports_fcoe_map.get(port_id) if fcoe_map: rate = fcoe_map.get('rate') if ip_addr and ip_addr != '-': pattern = re.compile(consts.IPV4_PATTERN) search_obj = pattern.search(ip_addr) if search_obj: ipv4 = ip_addr ipv4_mask = ip_mask else: ipv6 = ip_addr ipv6_mask = ip_mask wwn = None mac = None if port_type.upper() == 'ETH': mac = port.get('port_wwn/hw_addr') else: wwn = port.get('port_wwn/hw_addr') port_model = { 'name': port_id, 'storage_id': storage_id, 'native_port_id': port_id, 'location': port_id, 'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get( port.get('state', '').upper(), constants.PortConnectionStatus.UNKNOWN), 'health_status': constants.PortHealthStatus.NORMAL, 'type': consts.PORT_TYPE_MAP.get(port_type.upper(), constants.PortType.OTHER), 'logical_type': None, 'speed': self.parse_speed(rate), 'max_speed': self.parse_speed(max_speed), 'native_parent_id': None, 'wwn': wwn, 'mac_address': mac, 'ipv4': ipv4, 'ipv4_mask': ipv4_mask, 'ipv6': ipv6, 'ipv6_mask': ipv6_mask, } port_list.append(port_model) return port_list def parse_speed(self, speed_value): speed = 0 try: if speed_value == '' or speed_value == 'n/a': return None speeds = re.findall("\\d+", speed_value) if speeds: speed = int(speeds[0]) if 'Gbps' in speed_value: speed = speed * units.G elif 'Mbps' in speed_value: speed = speed * units.M elif 'Kbps' in speed_value: speed = speed * units.k except Exception as err: err_msg = "analyse speed error: %s" % (six.text_type(err)) LOG.error(err_msg) return speed def collect_perf_metrics(self, storage_id, resource_metrics, start_time, end_time): metrics = [] try: # storage-pool metrics if resource_metrics.get(constants.ResourceType.STORAGE_POOL): pool_metrics = self.get_pool_metrics( storage_id, resource_metrics.get(constants.ResourceType.STORAGE_POOL), start_time, end_time) metrics.extend(pool_metrics) # volume metrics if resource_metrics.get(constants.ResourceType.VOLUME): volume_metrics = self.get_volume_metrics( storage_id, resource_metrics.get(constants.ResourceType.VOLUME), start_time, end_time) metrics.extend(volume_metrics) # port metrics if resource_metrics.get(constants.ResourceType.PORT): port_metrics = self.get_port_metrics( storage_id, resource_metrics.get(constants.ResourceType.PORT), start_time, end_time) metrics.extend(port_metrics) # disk metrics if resource_metrics.get(constants.ResourceType.DISK): disk_metrics = self.get_disk_metrics( storage_id, resource_metrics.get(constants.ResourceType.DISK), start_time, end_time) metrics.extend(disk_metrics) except exception.DelfinException as err: err_msg = "Failed to collect metrics from Hpe3parStor: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise err except Exception as err: err_msg = "Failed to collect metrics from Hpe3parStor: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return metrics def get_pool_metrics(self, storage_id, metric_list, start_time, end_time): metrics = [] obj_metrics = {} pool_maps = {} pools = self.rest_handler.get_all_pools() if pools: pool_members = pools.get('members') for pool in pool_members: pool_maps[pool.get('name')] = str(pool.get('id')) obj_metrics = self.rest_format_metrics_data( start_time, end_time, self.rest_handler.get_pool_metrics, constants.ResourceType.STORAGE_POOL) if obj_metrics: for obj_name in obj_metrics.keys(): if pool_maps.get(obj_name): labels = { 'storage_id': storage_id, 'resource_type': constants.ResourceType.STORAGE_POOL, 'resource_id': pool_maps.get(obj_name), 'type': 'RAW', 'unit': '' } metric_model_list = self._get_metric_model(metric_list, labels, obj_metrics.get( obj_name), consts.POOL_CAP) if metric_model_list: metrics.extend(metric_model_list) return metrics def _get_metric_model(self, metric_list, labels, metric_values, obj_cap): metric_model_list = [] for metric_name in (metric_list or []): values = {} obj_labels = copy.deepcopy(labels) obj_labels['unit'] = obj_cap.get(metric_name).get('unit') for metric_value in metric_values: if metric_value.get(metric_name) is not None: collect_timestamp = self.convert_to_system_time( metric_value.get('collect_timestamp')) values[collect_timestamp] = metric_value.get( metric_name) if values: metric_model = constants.metric_struct(name=metric_name, labels=obj_labels, values=values) metric_model_list.append(metric_model) return metric_model_list def get_port_metrics(self, storage_id, metric_list, start_time, end_time): metrics = [] obj_metrics = self.ssh_format_metrics_data( start_time, end_time, self.ssh_handler.get_port_metrics, constants.ResourceType.PORT) if obj_metrics: for obj_id in obj_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': constants.ResourceType.PORT, 'resource_id': obj_id, 'type': 'RAW', 'unit': '' } metric_model_list = self._get_metric_model(metric_list, labels, obj_metrics.get( obj_id), consts.PORT_CAP) if metric_model_list: metrics.extend(metric_model_list) return metrics def get_disk_metrics(self, storage_id, metric_list, start_time, end_time): metrics = [] obj_metrics = self.ssh_format_metrics_data( start_time, end_time, self.ssh_handler.get_disk_metrics, constants.ResourceType.DISK) if obj_metrics: for obj_id in obj_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': constants.ResourceType.DISK, 'resource_id': obj_id, 'type': 'RAW', 'unit': '' } metric_model_list = self._get_metric_model(metric_list, labels, obj_metrics.get( obj_id), consts.DISK_CAP) if metric_model_list: metrics.extend(metric_model_list) return metrics def get_volume_metrics(self, storage_id, metric_list, start_time, end_time): metrics = [] obj_metrics = {} try: obj_metrics = self.ssh_format_metrics_data( start_time, end_time, self.ssh_handler.get_volume_metrics, constants.ResourceType.VOLUME) except Exception as err: err_msg = "Failed to collect volume metrics: %s" \ % (six.text_type(err)) LOG.warning(err_msg) if obj_metrics: for obj_id in obj_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': constants.ResourceType.VOLUME, 'resource_id': obj_id, 'type': 'RAW', 'unit': '' } metric_model_list = self._get_metric_model(metric_list, labels, obj_metrics.get( obj_id), consts.VOLUME_CAP) if metric_model_list: metrics.extend(metric_model_list) return metrics def ssh_format_metrics_data(self, start_time, end_time, get_obj_metrics, obj_type): collect_resuore_map = {} obj_metrics = get_obj_metrics(start_time, end_time) if obj_metrics: metric_value = obj_metrics[0] last_time = metric_value.get('collect_time', 0) first_time = last_time time_interval = consts.COLLECT_INTERVAL_HIRES while (last_time - time_interval) > start_time: next_obj_metrics = get_obj_metrics( start_time, (last_time - time_interval)) if next_obj_metrics: metric_value = next_obj_metrics[0] last_time = metric_value.get('collect_time', 0) if last_time > start_time: time_interval = first_time - last_time first_time = last_time obj_metrics.extend(next_obj_metrics) else: break else: break for obj_metric in (obj_metrics or []): obj_id = '' if obj_type == constants.ResourceType.DISK: obj_id = obj_metric.get('pdid') elif obj_type == constants.ResourceType.PORT: obj_id = '%s:%s:%s' % ( obj_metric.get('port_n'), obj_metric.get('port_s'), obj_metric.get('port_p')) elif obj_type == constants.ResourceType.VOLUME: obj_id = obj_metric.get('vvid') if obj_id: metric_list = [] if collect_resuore_map.get(obj_id): metric_list = collect_resuore_map.get(obj_id) else: collect_resuore_map[obj_id] = metric_list metric_map = {} metric_map['iops'] = float(obj_metric.get('iotot')) metric_map['readIops'] = float(obj_metric.get('iord')) metric_map['writeIops'] = float(obj_metric.get('iowr')) metric_map['throughput'] = round( float(obj_metric.get('kbytestot')) / units.k, 5) metric_map['readThroughput'] = round( float(obj_metric.get('kbytesrd')) / units.k, 5) metric_map['writeThroughput'] = round( float(obj_metric.get('kbyteswr')) / units.k, 5) metric_map['responseTime'] = float( obj_metric.get('svcttot')) metric_map['ioSize'] = float(obj_metric.get('iosztot')) metric_map['readIoSize'] = float(obj_metric.get('ioszrd')) metric_map['writeIoSize'] = float(obj_metric.get('ioszwr')) metric_map['collect_timestamp'] = obj_metric.get( 'collect_time') metric_list.append(metric_map) return collect_resuore_map def rest_format_metrics_data(self, start_time, end_time, get_obj_metrics, obj_type): collect_resuore_map = {} obj_metrics_list = [] obj_metrics = get_obj_metrics(start_time, end_time) if obj_metrics: last_time = obj_metrics.get('sampleTimeSec', 0) * units.k first_time = last_time time_interval = consts.COLLECT_INTERVAL_HIRES metric_members = obj_metrics.get('members') if metric_members: for member in metric_members: member['collect_timestamp'] = last_time obj_metrics_list.extend(metric_members) while (last_time - time_interval) > start_time: next_obj_metrics = get_obj_metrics( start_time, (last_time - time_interval)) metric_members = next_obj_metrics.get('members') if metric_members: last_time = next_obj_metrics.get( 'sampleTimeSec', 0) * units.k if last_time > start_time: time_interval = first_time - last_time first_time = last_time for member in metric_members: member['collect_timestamp'] = last_time obj_metrics_list.extend(metric_members) else: break else: break for obj_metric in (obj_metrics_list or []): obj_id = '' if obj_type == constants.ResourceType.STORAGE_POOL: obj_id = obj_metric.get('name') if obj_id: metric_list = [] if collect_resuore_map.get(obj_id): metric_list = collect_resuore_map.get(obj_id) else: collect_resuore_map[obj_id] = metric_list metric_map = {} metric_map['iops'] = obj_metric.get('IO').get('total') metric_map['readIops'] = obj_metric.get('IO').get('read') metric_map['writeIops'] = obj_metric.get('IO').get('write') metric_map['throughput'] = round( obj_metric.get('KBytes').get('total') / units.k, 5) metric_map['readThroughput'] = round( obj_metric.get('KBytes').get('read') / units.k, 5) metric_map['writeThroughput'] = round( obj_metric.get('KBytes').get('write') / units.k, 5) metric_map['responseTime'] = obj_metric.get( 'serviceTimeMS').get('total') metric_map['ioSize'] = obj_metric.get('IOSizeKB').get('total') metric_map['readIoSize'] = obj_metric.get('IOSizeKB').get( 'read') metric_map['writeIoSize'] = obj_metric.get('IOSizeKB').get( 'write') metric_map['collect_timestamp'] = obj_metric.get( 'collect_timestamp') metric_list.append(metric_map) return collect_resuore_map def get_latest_perf_timestamp(self): latest_time = 0 disks_metrics_datas = self.ssh_handler.get_disk_metrics(None, None) for metrics_data in (disks_metrics_datas or []): if metrics_data and metrics_data.get('collect_time'): latest_time = metrics_data.get('collect_time') break return latest_time def convert_to_system_time(self, occur_time): dateArray = datetime.datetime.utcfromtimestamp(occur_time / units.k) otherStyleTime = dateArray.strftime("%Y-%m-%d %H:%M:%SZ") timeArray = time.strptime(otherStyleTime, "%Y-%m-%d %H:%M:%SZ") timeStamp = int(time.mktime(timeArray)) hour_offset = (time.mktime(time.localtime()) - time.mktime( time.gmtime())) / consts.SECONDS_PER_HOUR occur_time = timeStamp * units.k + (int(hour_offset) * consts.SECONDS_PER_HOUR) * units.k return occur_time def list_storage_host_initiators(self, storage_id): initiators = self.ssh_handler.list_storage_host_initiators() initiators_list = [] wwn_set = set() for initiator in (initiators or []): if initiator: wwn = initiator.get('wwn/iscsi_name', '').replace('-', '') if wwn: if wwn in wwn_set: continue wwn_set.add(wwn) ip_addr = initiator.get('ip_addr') type = constants.InitiatorType.FC if ip_addr and ip_addr != 'n/a': type = constants.InitiatorType.ISCSI initiator_model = { "name": wwn, "storage_id": storage_id, "native_storage_host_initiator_id": wwn, "wwn": wwn, "type": type, "status": constants.InitiatorStatus.ONLINE, "native_storage_host_id": initiator.get('id', '').replace( '-', ''), } initiators_list.append(initiator_model) return initiators_list def list_storage_hosts(self, storage_id): host_datas = self.rest_handler.list_storage_host() host_list = [] if host_datas: hosts = host_datas.get('members') for host in (hosts or []): if host and host.get('name'): descriptors = host.get('descriptors') comment = None os = '' ip_addr = None if descriptors: comment = descriptors.get('comment') os = descriptors.get('os', '') ip_addr = descriptors.get('IPAddr') host_model = { "name": host.get('name'), "description": comment, "storage_id": storage_id, "native_storage_host_id": host.get('id'), "os_type": consts.HOST_OS_MAP.get( os, constants.HostOSTypes.UNKNOWN), "status": constants.HostStatus.NORMAL, "ip_address": ip_addr } host_list.append(host_model) return host_list def list_storage_host_groups(self, storage_id): host_groups = self.ssh_handler.list_storage_host_groups() host_group_list = [] result = {} if host_groups: hosts_map = self.ssh_handler.get_resources_ids( self.ssh_handler.HPE3PAR_COMMAND_SHOWHOST_D, consts.HOST_OR_VV_PATTERN) for host_group in host_groups: host_members = host_group.get('members') host_ids = [] if hosts_map: for host_name in (host_members or []): host_id = hosts_map.get(host_name) if host_id: host_ids.append(host_id) host_group_model = { "name": host_group.get('name'), "description": host_group.get('comment'), "storage_id": storage_id, "native_storage_host_group_id": host_group.get('id'), "storage_hosts": ','.join(host_ids) } host_group_list.append(host_group_model) storage_host_grp_relation_list = [] for storage_host_group in host_group_list: storage_hosts = storage_host_group.pop('storage_hosts', None) if not storage_hosts: continue storage_hosts = storage_hosts.split(',') for storage_host in storage_hosts: storage_host_group_relation = { 'storage_id': storage_id, 'native_storage_host_group_id': storage_host_group.get( 'native_storage_host_group_id'), 'native_storage_host_id': storage_host } storage_host_grp_relation_list \ .append(storage_host_group_relation) result = { 'storage_host_groups': host_group_list, 'storage_host_grp_host_rels': storage_host_grp_relation_list } return result def list_port_groups(self, storage_id): views = self.ssh_handler.list_masking_views() port_groups_list = [] port_list = [] for view in (views or []): port = view.get('port', '').replace('-', '') if port: if port in port_list: continue port_list.append(port) port_group_model = { "name": "port_group_" + port, "description": "port_group_" + port, "storage_id": storage_id, "native_port_group_id": "port_group_" + port, "ports": port } port_groups_list.append(port_group_model) port_group_relation_list = [] for port_group in port_groups_list: ports = port_group.pop('ports', None) if not ports: continue ports = ports.split(',') for port in ports: port_group_relation = { 'storage_id': storage_id, 'native_port_group_id': port_group.get('native_port_group_id'), 'native_port_id': port } port_group_relation_list.append(port_group_relation) result = { 'port_groups': port_groups_list, 'port_grp_port_rels': port_group_relation_list } return result def list_volume_groups(self, storage_id): volume_groups = self.ssh_handler.list_volume_groups() volume_group_list = [] result = {} if volume_groups: volumes_map = self.ssh_handler.get_resources_ids( self.ssh_handler.HPE3PAR_COMMAND_SHOWVV, consts.HOST_OR_VV_PATTERN) for volume_group in volume_groups: volume_members = volume_group.get('members') volume_ids = [] if volumes_map: for volume_name in (volume_members or []): volume_id = volumes_map.get(volume_name) if volume_id: volume_ids.append(volume_id) volume_group_model = { "name": volume_group.get('name'), "description": volume_group.get('comment'), "storage_id": storage_id, "native_volume_group_id": volume_group.get('id'), "volumes": ','.join(volume_ids) } volume_group_list.append(volume_group_model) volume_group_relation_list = [] for volume_group in volume_group_list: volumes = volume_group.pop('volumes', None) if not volumes: continue volumes = volumes.split(',') for volume in volumes: volume_group_relation = { 'storage_id': storage_id, 'native_volume_group_id': volume_group.get('native_volume_group_id'), 'native_volume_id': volume} volume_group_relation_list.append(volume_group_relation) result = { 'volume_groups': volume_group_list, 'vol_grp_vol_rels': volume_group_relation_list } return result def list_masking_views(self, storage_id): views = self.ssh_handler.list_masking_views() views_list = [] if views: hosts_map = self.ssh_handler.get_resources_ids( self.ssh_handler.HPE3PAR_COMMAND_SHOWHOST_D, consts.HOST_OR_VV_PATTERN) hosts_group_map = self.ssh_handler.get_resources_ids( self.ssh_handler.HPE3PAR_COMMAND_SHOWHOSTSET_D, consts.HOST_OR_VV_PATTERN) volumes_map = self.ssh_handler.get_resources_ids( self.ssh_handler.HPE3PAR_COMMAND_SHOWVV, consts.HOST_OR_VV_PATTERN) volumes_group_map = self.ssh_handler.get_resources_ids( self.ssh_handler.HPE3PAR_COMMAND_SHOWVVSET_D, consts.HOST_OR_VV_PATTERN) host_vv_set = set() for view in views: vv_name = view.get('vvname') host_name = view.get('hostname') if vv_name and host_name: host_vv_key = '%s_%s' % (host_name, vv_name) host_vv_key = host_vv_key.replace(' ', '') if host_vv_key in host_vv_set: continue host_vv_set.add(host_vv_key) port = view.get('port', '').replace('-', '') lun_id = view.get('lun') wwn = view.get('host_wwn/iscsi_name', '').replace('-', '') native_port_group_id = None if port: lun_id = '%s_%s' % (lun_id, port) native_port_group_id = 'port_group_%s' % port if wwn: lun_id = '%s_%s' % (lun_id, wwn) lun_id = '%s_%s' % (lun_id, host_vv_key) view_model = { 'native_masking_view_id': lun_id, "name": view.get('lun'), 'native_port_group_id': native_port_group_id, "storage_id": storage_id } if 'set:' in vv_name: vv_set_id = volumes_group_map.get( vv_name.replace('set:', '')) view_model['native_volume_group_id'] = vv_set_id else: vv_id = volumes_map.get(vv_name) view_model['native_volume_id'] = vv_id if 'set:' in host_name: host_set_id = hosts_group_map.get( host_name.replace('set:', '')) view_model[ 'native_storage_host_group_id'] = host_set_id else: host_id = hosts_map.get(host_name) view_model['native_storage_host_id'] = host_id if (view_model.get('native_storage_host_id') or view_model.get('native_storage_host_group_id')) \ and (view_model.get('native_volume_id') or view_model.get('native_volume_group_id')): views_list.append(view_model) return views_list ================================================ FILE: delfin/drivers/hpe/hpe_3par/consts.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # CPG's status from delfin.common import constants STATUS_POOL_NORMAL = 1 # CPG STATUS Normal operation STATUS_POOL_DEGRADED = 2 # CPG STATUS Degraded state STATUS_POOL_FAILED = 3 # CPG STATUS Abnormal operation STATUS_POOL_UNKNOWN = 99 # CPG STATUS Unknown state # VOLUME's status STATUS_VOLUME_NORMAL = 1 # VOLUME STATUS Normal operation STATUS_VOLUME_DEGRADED = 2 # VOLUME STATUS Degraded state STATUS_VOLUME_FAILED = 3 # VOLUME STATUS Abnormal operation STATUS_VOLUME_UNKNOWN = 99 # VOLUME STATUS Unknown state # VOLUME's type THIN_LUNTYPE = 2 # TPVV 2 • TPVV, # VOLUME's Compression status STATUS_COMPRESSION_YES = 1 # Compression is enabled on the volume # VOLUME's deduplication status STATUS_DEDUPLICATIONSTATE_YES = 1 # Enables deduplication on the volume # Page size per page at default paging QUERY_PAGE_SIZE = 150 # Connection timeout LOGIN_SOCKET_TIMEOUT = 10 SOCKET_TIMEOUT = 10 # 403 The client request has an invalid session key. # The request came from a different IP address ERROR_SESSION_INVALID_CODE = 403 # 409 Session key is being used. ERROR_SESSION_IS_BEING_USED_CODE = 409 # http SUCCESS's status SUCCESS_STATUS_CODES = 200 # session SUCCESS's status LOGIN_SUCCESS_STATUS_CODES = 201 SERVICE_UNAVAILABLE_CODES = 503 BAD_REQUEST_CODES = 400 NOT_IMPLEMENTED_CODES = 501 # alert state enumeration ALERT_STATE_NEW = 1 # New. ALERT_STATE_ACKED = 2 # Acknowledged state. ALERT_STATE_FIXED = 3 # Alert issue fixed. ALERT_STATE_UNKNOWN = 99 # Unknown state # alert severity enumeration ALERT_SEVERITY_CRITICAL = 2 ALERT_SEVERITY_MAJOR = 3 ALERT_SEVERITY_MINOR = 4 ALERT_SEVERITY_DEGRADED = 5 # alert code HPE3PAR_ALERT_CODE = { '0x0000000': 'Node CPU Thermal Status', '0x0010001': 'Serial link event', '0x0010002': 'Serial link fail FIFO full', '0x0010003': 'Serial link fail full loss', '0x0010004': 'Serial link fail rate loss', '0x0020001': 'Active VLUN Limit Exceeded', '0x0020002': 'System Reporter VLUN performance (major alert)', '0x0020003': 'System Reporter VLUN performance (critical alert)', '0x0020004': 'System Reporter VLUN performance (minor alert)', '0x0020005': 'System Reporter VLUN performance (info alert)', '0x0030001': 'Firmware coredump event', '0x0030002': 'Too many WWNs on an RCFC port', '0x0030003': 'Host [[sw_port]] experienced over 50 CRC ' 'errors () in 24 hours', '0x0030005': 'FC Port Error', '0x0030006': 'FC Port Loop Connection Type Not Supported', '0x0030007': 'RCFC port sees non-3PAR WWNs', '0x0030009': 'Excessive retransmits on RCFC port', '0x0030010': 'Port Device Count Exceeded', '0x0030011': 'CRC error on RCIP port', '0x0030012': 'Unsupported SATA Drive', '0x0030013': 'Unsupported SAS Device', '0x0030014': 'Multiple SAS Initiators', '0x0030015': 'System Reporter port performance (major alert)', '0x0030016': 'Disk Port has exceeded IO error threshold', '0x0030017': 'System Reporter port performance (critical alert)', '0x0030018': 'System Reporter port performance (minor alert)', '0x0030019': 'System Reporter port performance (info alert)', '0x00300de': 'Component state change', '0x00300fa': 'Component state change', '0x0040001': 'Metadata inconsistency in a VV', '0x0040003': 'Admin Volume I/O timeout', '0x0040004': 'VV availability', '0x0040005': 'Pinned DCOWs', '0x0040006': 'Aborted DCOWs', '0x0040007': 'Recovery scan found corrupt log', '0x0040008': 'vlmap count exceeds threshold', '0x0040009': 'FlashCache performance degradation', '0x004000a': 'VV unrecovered DIF error', '0x004000b': 'Metadata inconsistency in a Deduplication Group', '0x004000c': 'VV unrecovered DIF error', '0x004000d': 'System Reporter VV space major alert', '0x004000e': 'System Reporter VV space critical alert', '0x004000f': 'System Reporter VV space minor alert', '0x0040010': 'System Reporter VV space info alert', '0x0040011': 'Flash Cache Creation Failure', '0x0040012': 'SD Metadata inconsistency in a VV', '0x0040013': 'Compression is not enabled for Volumes less than 16GB', '0x0040014': 'System VV detected', '0x00400de': 'Component state change', '0x00400fa': 'Component state change', '0x0050002': 'Ldsk has failed set', '0x0050003': 'LD check summary message', '0x0050004': 'LD availability has reduced', '0x0050005': 'Log LD raid set failure.', '0x0050006': 'System Reporter LD performance (major alert)', '0x0050007': 'LD check inconsistent', '0x0050008': 'LD check failed LD not consistent', '0x0050009': 'LD check consistent', '0x005000a': 'LD check changed logical disk', '0x005000b': 'System Reporter LD performance (critical alert)', '0x005000c': 'System Reporter LD performance (minor alert)', '0x005000d': 'System Reporter LD performance (info alert)', '0x005000f': 'System Reporter LD space critical alert', '0x0050010': 'System Reporter LD space minor alert', '0x0050011': 'System Reporter LD space info alert', '0x0050012': 'System Reporter LD space major alert', '0x0060001': 'Disk fail alert', '0x0060002': 'Disk monitor stopped', '0x0060003': 'Invalid PD configuration', '0x0060007': '42 Alerts', '0x0060008': 'Disk overtemp warning', '0x0060009': 'Disk overtemp alert', '0x006000a': 'Chunklet relocation failure', '0x006000b': 'System Reporter PD performance (major alert)', '0x006000c': 'System overtemp', '0x006000d': 'Disk overtemp warning', '0x006000e': 'Disk overtemp alert', '0x0060011': 'Disk overtemp but not spundown', '0x0060012': 'Disk overtemp and spundown', '0x0060013': 'Disk overtemp but not spundown no DSK', '0x0060014': 'Disk overtemp and spundown no DSK', '0x0060015': 'System Reporter PD space major alert', '0x0060016': 'System Reporter PD space critical alert', '0x0060017': 'System Reporter PD space minor alert', '0x0060018': 'System Reporter PD space info alert', '0x0060019': 'System Reporter PD performance critical alert', '0x006001a': 'System Reporter PD performance minor alert', '0x006001b': 'System Reporter PD performance info alert', '0x00600de': 'Component state change', '0x00600fa': 'Component state change', '0x0070001': 'No free chunklet found for relocation', '0x0070002': 'No spare chunklet found for relocation', '0x0080001': 'Could not process SCSI DB', '0x0090001': 'Host Path Status Change', '0x00900de': 'Component state change', '0x00a0005': 'Snap Admin Volume low on space, degraded', '0x00a0006': 'Snap Data Volume low on space, degraded', '0x00a0007': 'Second snap Data Volume low on space, degraded', '0x00b0001': 'Kernel crashdump event', '0x00b0002': 'Kernel crashdump with error', '0x00c0001': 'Process has exited', '0x00c0002': 'Process cannot be started', '0x00c0003': 'Process coredump event', '0x00c0004': 'Attempt to run grub failed', '0x00c0005': 'Attempt to run grub failed, PM not starting', '0x00c0006': 'Attempt to run grub failed, retval', '0x00c0007': 'Process coredump with error', '0x00d0001': 'Corrupt PR table found', '0x00d0002': 'PR transition', '0x00d0003': 'PR transition, degraded.', '0x00e0001': 'Double node failure', '0x00e0002': 'System manager cannot startup', '0x00e0003': 'Node recovery powerfail event', '0x00e0004': ' use of golden license', '0x00e0005': 'License key usage, license expired', '0x00e0006': 'System recovery notification about bad volume', '0x00e0007': 'Pfail partition needs to be wiped', '0x00e0008': 'Power fail saved version mismatch', '0x00e0009': 'Failed to save task data', '0x00e000a': 'Task failed', '0x00e000b': 'Pfail recovery continued with failed previous NM1 recovery', '0x00e000d': 'System recovery stalled due to unknown replicant state', '0x00e000e': 'System recovery stalled due to sole owner of ld missing', '0x00e0011': '"servicemag start" operation has completed', '0x00e0012': '"servicemag resume" operation has completed', '0x00e0014': 'Battery States', '0x00e0015': 'Node not integrated', '0x00e0016': 'System recovery stalled due to unstarted vvs', '0x00e0017': 'TOC corruption detected', '0x00e0018': 'Pfail Recovery with a missing VV', '0x00e0019': 'Pfail Recovery with VV in bad state', '0x00e001a': 'Pfail Recovery skipped due to multiple NM1 nodes', '0x00e001b': 'NM1 pfail recovery proceeding with missing replicant', '0x00e001c': 'Configuration lock hold time', '0x00e001d': 'Inconsistent TOC object removed', '0x00e001e': 'Invalid VVMEMB(s) resolved', '0x00e001f': '"servicemag resume" operation has passed ' 'with dismissed disks', '0x00e0020': '"servicemag resume" operation has passed ' 'without dismissing any disks', '0x00e0021': '"servicemag resume" operation has failed ' 'with no error message', '0x00e0022': '"servicemag resume" operation has failed to admit disk', '0x00e0023': '"servicemag resume" operation has failed ' 'unrecoverable disk', '0x00e0024': '"servicemag resume" operation has failed to ' 'relocate_chunklets', '0x00e0025': 'System manager cannot start up, TOC not found', '0x00e0026': 'System manager cannot start up, waiting on nodes', '0x00e0027': 'System manager cannot start up, manual start up set', '0x00e0028': 'System manager cannot start up, TOC quorum not met', '0x00e0029': 'System manager cannot start up, waiting for ' 'nodes to recover', '0x00e002a': 'Pfail partition needs to be wiped', '0x00e002b': 'Pfail partition needs to be wiped', '0x00e002c': 'System manager cannot start up, incomplete powerfail', '0x00e002d': 'System manager cannot start up, TOC quorum found, ' 'incomplete powerfail', '0x00e002e': 'System manager cannot start up, TOC quorum found, ' 'waiting for nodes to recover', '0x00e002f': 'System manager cannot start up, waiting for nodes ' 'to recover', '0x00e0030': 'Unexpected encryption state on node drive', '0x00e0031': '"servicemag start" failed', '0x00e0032': 'Single node WBC is active', '0x00e0033': 'Single node WBC is expired', '0x0100001': 'Online upgrade', '0x0100002': 'Unresponsive IOCTL', '0x0100003': 'Update available', '0x0100004': 'Update status', '0x0100005': 'Update install status', '0x0100006': 'Unresponsive IOCTL Verbose', '0x0110001': 'Errors accessing the IDE disk', '0x0110002': 'IDE disk error handling', '0x0110004': 'Version mismatch event', '0x0110005': 'Serial comm init failed', '0x0110006': 'IDE disk error node shutdown', '0x0110007': 'IDE disk error node not shutdown', '0x0110008': 'IDE disk error node not shutdown LDs cannot be served', '0x0110009': 'IDE disk error node reboot', '0x011000a': 'Version mismatch event for svcalert', '0x011000b': 'Version mismatch event', '0x011000c': 'Version mismatch event', '0x0130001': 'Too many alerts in the system', '0x0140001': 'Notification', '0x0140003': 'fork(2) call failed', '0x0140004': 'System Reporter QoS performance (major alert)', '0x0140005': 'SFP Unqualified Notification', '0x0140007': 'System upgrade cancelled', '0x0140008': 'System upgrade Cancellation Failed', '0x0140009': 'System serial number could not be determined', '0x014000a': 'DC3 I2C Lockup Reset Succeeded', '0x014000b': 'DC3 I2C Lockup Reset Failed', '0x014000c': 'admitpd not allowed on Emulex generated wwn', '0x014000d': 'admitpd not allowed on toto-sata generated wwn', '0x014000e': 'RAID 0 LD failed due to stale chunklet', '0x014000f': 'Mismatch of failed chunklet information', '0x0140010': 'System Reporter QoS performance (critical alert)', '0x0140011': 'System Reporter QoS performance (minor alert)', '0x0140012': 'System Reporter QoS performance (info alert)', '0x0150004': 'CLI server cannot communicate with system manager', '0x0150005': 'CLI internal error using authentication library', '0x0150006': 'Authentication failure', '0x0150007': 'CLI internal error', '0x015000c': 'CPG free space limit', '0x015000d': 'CLI client process event', '0x015000f': 'Relocatepd request', '0x0150010': 'Control Recovery Auth Ciphertext Export', '0x0150011': 'CLI server process event, max tpdtcl exceeded', '0x0150012': 'CLI server process event, twice max tpdtcl exceeded', '0x0150013': 'CLI server process event, max CLI server exceeded', '0x0150014': 'CLI server process event, max local exceeded', '0x0150015': 'CLI server process event, max server exceeded brief', '0x0150016': 'CLI server process event, max server exceeded local', '0x0150017': 'CLI server process event, error in track', '0x0150018': 'CLI server process event, error in store user name', '0x0150019': 'CLI server process event, svcalert brief', '0x015001a': 'CLI server process event, svcalert', '0x015001b': 'CLI internal error Failed sanity check', '0x015001c': 'CLI internal error sqlite database', '0x015001d': 'CLI internal error SQLite DB', '0x015001f': 'CLI client process event disk high temp', '0x0150020': 'Unable to send an event to the security syslog server.', '0x0150021': 'Connection has been reestablished to the ' 'security syslog server.', '0x0150022': 'Slow Disk temperature unavailable', '0x0170001': 'TOC update', '0x0170004': 'TOC update, not above error threshold and decreased.', '0x0170005': 'TOC update, not above warn threshold and decreased.', '0x0190001': 'ea msg timeout', '0x0190002': 'Pre Integration Link Test Error', '0x01a0001': 'CPU Memory Correctable ECC', '0x01a0002': 'Node is offline', '0x01a0003': 'Node Time of Day Battery', '0x01a0005': 'HW: CPU Memory Correctable ECC', '0x01a0006': 'CPU Configuration', '0x01a0007': 'BIOS IDE log entry', '0x01a0008': 'Node Environmental Check Pass', '0x01a0009': 'IDE file integrity check results', '0x01a000b': 'Eagle memory uerr', '0x01a000c': 'Eagle memory muerr', '0x01a000d': 'Eagle memory cerr', '0x01a000e': 'Eagle internal system error', '0x01a000f': 'Eagle hardware watchdog error', '0x01a0010': 'Eagle PCI error', '0x01a0011': 'Eagle driver software error', '0x01a0012': 'Memory usage information', '0x01a0014': 'Too many TCP segment retransmits', '0x01a0015': 'Node PCIe Correctable Error Status', '0x01a0016': 'Node PCIe Link Status', '0x01a0017': 'Too many TCP segment errors', '0x01a0019': 'Cluster thermal shutdown', '0x01a001a': 'Link Configuration Mismatch', '0x01a001b': 'Unexpected Cable Event', '0x01a001c': 'Link establish alert', '0x01a001d': 'Core File Received From Remote/Local MCU', '0x01a001f': 'Node Needs to Shutdown', '0x01a0021': 'Node Rescue', '0x01a0022': 'Node-Failure-Analysis File Received From Remote/Local MCU', '0x01a0024': 'Slab usage information', '0x01a0025': 'System Reporter cmp performance (major alert)', '0x01a0026': 'System Reporter CPU performance (major alert)', '0x01a0027': 'System Reporter link performance (major alert)', '0x01a0028': 'Node ID Mismatch', '0x01a0029': 'Remote Node ID Mismatch', '0x01a002a': 'System Model Mismatch', '0x01a002b': 'Remote System Model Mismatch', '0x01a002c': 'Node Type Mismatch', '0x01a002d': 'Remote Node Type Mismatch', '0x01a002e': 'SSN Mismatch', '0x01a002f': 'Remote SSN Mismatch', '0x01a0031': 'Node Rescue User Abort', '0x01a0032': 'Node Rescue Invalid', '0x01a0033': 'Node Rescue Internal Communication Error', '0x01a0034': 'Node Rescue No Rejoin', '0x01a0035': 'Node Rescue Port 80 Blocked', '0x01a0036': 'Node Rescue Port 69 Blocked', '0x01a0037': 'Node Rescue Port 873 Blocked', '0x01a0038': 'Node Rescue No Backplane Connection', '0x01a0039': 'CMP Threshold', '0x01a003a': 'DIF error', '0x01a003b': 'IDE file integrity check bad run', '0x01a003c': 'IDE file integrity check bad', '0x01a003d': 'IDE file integrity check very bad', '0x01a003e': 'System Reporter cache performance alert', '0x01a003f': 'Legacy System Model Mismatch', '0x01a0040': 'Remote System Model Mismatch', '0x01a0041': 'Node Rescue Detected Dual Boot Node Drive Size Mismatch', '0x01a0042': 'Node Environmental Check Fail', '0x01a0043': 'Node Thermal Status svc alert', '0x01a0044': 'Node Needs to Shutdown svc alert', '0x01a0045': 'Node Thermal Status Alert', '0x01a0046': 'Node Thermal Status Warning', '0x01a0047': 'System Reporter cmp performance (critical alert)', '0x01a0048': 'System Reporter cmp performance (minor alert)', '0x01a0049': 'System Reporter cmp performance (info alert)', '0x01a004a': 'System Reporter CPU performance (critical alert)', '0x01a004b': 'System Reporter CPU performance (minor alert)', '0x01a004c': 'System Reporter CPU performance (info alert)', '0x01a004d': 'System Reporter link performance (critical alert)', '0x01a004e': 'System Reporter link performance (minor alert)', '0x01a004f': 'System Reporter link performance (info alert)', '0x01a0050': 'System Reporter cache performance (critical alert)', '0x01a0051': 'System Reporter cache performance (minor alert)', '0x01a0052': 'System Reporter cache performance (info alert)', '0x01a0053': 'Eagle link error', '0x01a0054': 'System Series Mismatch', '0x01a0055': 'Remote System Series Mismatch', '0x01a0056': 'Node temporary filesystem in use', '0x01a0057': 'Node rescue detected that rescuee node has an ' 'incompatible board series', '0x01a00de': 'Component state change', '0x01a00fa': 'Component state change', '0x01b0001': 'Power Supply', '0x01b0002': 'Power Supply DC Status', '0x01b0003': 'Power Supply AC Status', '0x01b0004': 'Power Supply Fan Status', '0x01b0005': 'Power Supply Charger Status', '0x01b0009': 'Power Supply Type Mismatch', '0x01b0015': 'VSC 055 Interrupt Error', '0x01b00de': 'Component state change', '0x01b00fa': 'Component state change', '0x01d0001': 'Bios eeprom log events', '0x01e0001': 'Cage log event', '0x01e0005': 'Cage coredump event', '0x01e0006': 'servicemag failed to dismiss PD: ' 'cage , mag , ' 'taskid , pd : error - ', '0x01e0007': 'Critical ESI port count, down to one', '0x01e0008': 'Critical ESI port count, one valid', '0x01e0009': 'Critical ESI port count, lost', '0x01e000a': 'Invalid cage isolated configuration', '0x01e000b': 'Invalid cage isolated configuration', '0x01e000c': 'Invalid cage mixed configuration', '0x01e000d': 'Invalid cage unknown configuration', '0x01e000e': 'Invalid cage partners configuration', '0x01e000f': 'Invalid cage maxcage configuration', '0x01e0010': 'Invalid cage twice configuration', '0x01e0011': 'Unknown cage configuration', '0x01e0012': 'Cage coredump event - detailed - 0', '0x01e0013': 'Cage coredump event - detailed - 1', '0x01e0014': 'Cage coredump event - detailed - 2', '0x01e0015': 'Cage coredump event - detailed - 3', '0x01e0016': 'Cage coredump event - very detailed - 0', '0x01e0017': 'Cage coredump event - very detailed - 1', '0x01e0018': 'Cage log event, firmware panic', '0x01e0019': 'Cage log event, midplane esi', '0x01e001a': 'Cage log event, midplane', '0x01e001b': 'Cage log event, post', '0x01e001c': 'Cage log event, midplane lm87', '0x01e001d': 'Cage log event, midplane pmc', '0x01e00de': 'Component state change', '0x01e00fa': 'Component state change', '0x01f0001': 'Mixing SSDs with different RPMs not supported', '0x01f00de': 'Component state change', '0x01f00fa': 'Component state change', '0x0200006': 'GUI server can not communicate with the system manager', '0x0200009': 'Internal error in authentication library', '0x0210001': 'InForm GUI has lost connection to the event filter', '0x0220001': 'Battery expiring soon', '0x0220010': 'Assert Battery FAIL', '0x0220014': 'Battery Type Mismatch', '0x0220017': 'Battery expiration soon', '0x02200de': 'Component state change', '0x02200fa': 'Component state change', '0x0230003': 'Port shutdown on fatal error', '0x0230004': 'Host port is down', '0x0230005': 'All ports in the same FC card must be configured for RCFC', '0x0230006': 'HBA fw file status', '0x0230007': 'HBA FW error opening file', '0x0230008': 'HBA FW error reading file', '0x0230009': 'HBA FW unsupported file', '0x0240002': 'Internodal Serial Port Receiver Timeout Error', '0x0240003': 'Internodal Serial Port Default Error', '0x0250002': 'Remote Copy link status', '0x0250007': 'System Reporter RC Target performance (major alert)', '0x0250008': 'System Reporter RC VV performance (major alert)', '0x0250009': 'Remote Copy group in failsafe state', '0x025000a': 'Replication resource usage exceeded - Group "Logging".', '0x025000b': 'Replication resource usage exceeded - Group "Stopped".', '0x025000c': 'Replication resources restored - Group transition ' 'from Logging failure', '0x025000d': 'System Reporter RC VV performance (critical alert)', '0x025000e': 'System Reporter RC VV performance (minor alert)', '0x025000f': 'System Reporter RC VV performance (info alert)', '0x0250011': 'System Reporter RC Target performance (critical alert)', '0x0250012': 'System Reporter RC Target performance (minor alert)', '0x0250013': 'System Reporter RC Target performance (info alert)', '0x0250014': 'Remote Copy group status alert', '0x0250015': 'Remote Copy group status fail', '0x0250016': 'Quorum is not in Started state', '0x0260001': 'Ethernet Monitor Event', '0x0260002': 'No admin network interface discovered', '0x0270001': 'TP VV allocation size warning', '0x0270002': 'TP VV allocation size limit', '0x0270003': 'Snapshot space allocation size warning', '0x0270004': 'Snapshot space allocation size limit', '0x0270005': 'CPG growth warning', '0x0270006': 'CPG growth limit', '0x0270007': 'TP VV allocation failure', '0x0270008': 'Snapshot space allocation failure', '0x0270009': 'CPG growth failure', '0x027000e': 'FC raw space allocation 50% alert', '0x027000f': 'FC raw space allocation 75% alert', '0x0270010': 'FC raw space allocation 85% alert', '0x0270011': 'FC raw space allocation 95% alert', '0x0270012': 'CPG space used status', '0x0270013': 'Raw space allocation user configured alert', '0x0270014': 'NL raw space allocation 50% alert', '0x0270015': 'NL raw space allocation 75% alert', '0x0270016': 'NL raw space allocation 85% alert', '0x0270017': 'NL raw space allocation 95% alert', '0x0270018': 'CPG was grown with degraded parameters', '0x0270019': 'SSD raw space allocation 50% alert', '0x027001a': 'SSD raw space allocation 75% alert', '0x027001b': 'SSD raw space allocation 85% alert', '0x027001c': 'SSD raw space allocation 95% alert', '0x027001d': 'CPG growth failure non-admin', '0x027001e': 'CPG growth non admin limit', '0x027001f': 'CPG growth non admin warning', '0x0270020': 'Overprovisioning CPG warning alert', '0x0270021': 'Overprovisioning CPG limit alert', '0x0270022': 'Overprovisioning warning alert', '0x0270023': 'Overprovisioning limit alert', '0x0270024': 'System Reporter CPG space critical alert', '0x0270025': 'System Reporter CPG space minor alert', '0x0270026': 'System Reporter CPG space info alert', '0x0270027': 'System Reporter CPG space major alert', '0x0280001': 'Preserved data LDs configuration', '0x0280002': 'Preserved data LDs unavailable', '0x0280003': 'Preserved data LDs are filling up', '0x0280004': 'Preserved data LDs are full', '0x0280005': 'LD availability', '0x0280006': 'Preserved data LDs status, mangler class', '0x0280007': 'Preserved data LDs configuration, Not configured', '0x0280008': 'Preserved data LDs configuration, Not started', '0x02900de': 'Component state change', '0x02a00de': 'Component state change', '0x02a00fa': 'Component state change', '0x02b00de': 'Component state change', '0x02b00fa': 'Component state change', '0x02d00de': 'Component state change', '0x02d00fa': 'Component state change', '0x03500de': 'Component state change', '0x03500fa': 'Component state change', '0x0360002': 'Write Cache Availability', '0x0360003': 'System Reporter system space critical alert', '0x0360004': 'System Reporter system space major alert', '0x0360005': 'System Reporter system space info alert', '0x0360006': 'System Reporter system space minor alert', '0x03700de': 'Component state change', '0x03700fa': 'Component state change', '0x03800de': 'Component state change', '0x03900fa': 'Component state change', '0x03a00de': 'Component state change', '0x03a00fa': 'Component state change', '0x03b0002': 'Free node disk space low', '0x03b0004': 'Node drive is encrypted but encryption is ' 'not enabled on the system', '0x03b0005': 'Encryption is enabled on the system but the ' 'node drive is not encrypted', '0x03b0006': 'Unable to do I/O to the node drive', '0x03b0007': 'Free node disk space low, /common not mounted', '0x03b0008': 'Free node disk space low, /altroot not mounted', '0x03b0009': 'Free node disk space low, /common and /altroot not mounted', '0x03b000a': 'Syslog Node Drive Failure Message Monitoring', '0x03b000b': 'Periodic /proc/mdstat Monitoring ' 'Detected Degraded Node Drive Raid', '0x03b000c': 'Lost interrupt', '0x03b000d': 'IDE SMART failed self check', '0x03b000e': 'IDE SMART unreadable sectors', '0x03b000f': 'IDE SMART uncorrectable sectors', '0x03b0010': 'IDE SMART failed unit ready', '0x03b0011': 'IDE SMART failed usage attribute', '0x03b0012': 'IDE SMART failure', '0x03b0013': 'IDE SMART execute test failed', '0x03b0014': 'IDE SMART new self test log error', '0x03b0015': 'IDE SMART repeat self test log error', '0x03b0016': 'IDE SMART ATA error increase', '0x03b0017': 'IDE SMART attribute data read fail', '0x03b0019': 'IDE SMART error log read fail', '0x03b0020': 'DUAL IDE SMART failed self check', '0x03b0021': 'DUAL IDE SMART unreadable sectors', '0x03b0022': 'DUAL IDE SMART uncorrectable sectors', '0x03b0023': 'DUAL IDE SMART failed unit ready', '0x03b0024': 'DUAL IDE SMART failed usage attribute', '0x03b0025': 'DUAL IDE SMART failure', '0x03b0026': 'DUAL IDE SMART execute test failed', '0x03b0027': 'DUAL IDE SMART new self test log error', '0x03b0028': 'DUAL IDE SMART repeat self test log error', '0x03b0029': 'DUAL IDE SMART ATA error increase', '0x03b002a': 'DUAL IDE SMART attribute data read fail', '0x03b002b': 'DUAL IDE SMART error log read fail', '0x03f0001': 'Process appears unresponsive', '0x03f0002': 'Process name appears unresponsive', '0x03f0003': 'Process event handling appears unresponsive', '0x0450001': 'Data Cache DIMM CECC Monitoring', '0x0450002': 'Patrol Data Cache DIMM UERR', '0x0460001': 'Control Cache DIMM Temperature', '0x0460002': 'Control Cache DIMM Temperature', '0x0460003': 'Node FB-DIMM AMB Correctable Error Status', '0x04a0001': 'Slot PCIe Correctable Error Status', '0x04a0002': 'Slot PCIe Link Status', '0x04e0001': 'Rejecting SSH Connection', '0x04e0002': 'Rejecting SSH Connection from IP', '0x0500001': 'A system task failed', '0x05d00de': 'Component state change', '0x05d00fa': 'Component state change', '0x0600005': 'WSAPI internal error using authentication library', '0x06200fa': 'Component state change', '0x0640001': 'PD Scrub', '0x0660001': 'SED is from the wrong system', '0x0660002': 'SED has the wrong key', '0x0660003': 'SED is present, but encryption is not enabled', '0x0660004': 'LKM is in an unknown state', '0x0660005': 'MMAP failed to map the segment of the memory with keys', '0x0660006': 'Nodesvr unresponsive during darsvr startup', '0x0660007': 'Nodesvr unresponsive during fipsvr startup', '0x0660008': 'fipsvr unable to start in FIPS mode', '0x0660009': 'Failed to successfully communicate with EKM at startup', '0x066000a': 'Controlencryption restore failed', '0x066000b': 'Controlencryption restore ignore failed', '0x066000c': 'Controlencryption restore ignore succeeded with failures', '0x066000d': 'Encryption operation attempted on drive with WWN 0', '0x066000e': 'Unsupported drive present in the system', '0x06700de': 'Component state change', '0x0680001': 'Quorum Witness', '0x06e0001': 'File Services state change', '0x0720001': 'File Provisioning Group', '0x0740001': 'File Store', '0x0750001': 'Virtual Server IP Address', '0x0760001': 'Node Network Bond', '0x0770001': 'Node Network Interface', '0x0780001': 'Node IP Address', '0x0790001': 'File Service Node Active Directory Configuration', '0x07e0001': 'Anti-Virus VSE Server', '0x0810001': 'Anti-Virus Scan', '0x0820001': 'Virtual Server Certificate', '0x0840001': 'HTTP Share', '0x0850001': 'NFS Share', '0x0860001': 'SMB Share', '0x0870001': 'User Quota', '0x08b0001': 'File Store Snapshot', '0x08c0001': 'File Provisioning Group Snap Reclamation Task', '0x08d0001': 'Overall File Services for Node', '0x08e0001': 'File Services Software Update', '0x08f0001': 'File Services Log Collection', '0x0900001': 'File Service Virtual Server Backup', '0x0960002': 'Vasa Provider migration failed due to VVol SC migration', '0x0960003': 'Vasa Provider migration failed due ' 'to Certificate mode migration', '0x0960004': 'Vasa Provider migration failed while updating config file', '0x0960005': 'VASA provider could not start because of ' 'issues with the VASA Certificate', '0x0990001': 'Static IP Route', '0x09a0001': 'SMB Global Setting State change event', '0x09b0001': 'Ddcscan Monitoring', '0x09d0001': 'NVDIMM Battery Failure', '0x09e0003': 'Management Module High Temperature', '0x09e0004': 'Management Module not responding', '0x09f0001': 'File Persona VM shutdown', '0x09f0002': 'File Persona CPG grow limit warning', '0x0a50001': 'File Access Auditing Alerts' } NODE_PATTERN = "^\\s*Node\\s+[-]*Name[-]*\\s+[-]*State[-]*\\s+" CPU_PATTERN = "^\\s*Node\\s+CPU" DISK_PATTERN = "^\\s*Id\\s+[-]*CagePos[-]*\\s+[-]*Type[-]*\\s+RPM\\s+State\\s+" DISK_I_PATTERN = "^\\s*Id\\s+[-]*CagePos[-]*\\s+[-]*State[-]*\\s+" \ "[-]*Node_WWN[-]*\\s+[-]*MFR[-]*\\s+[-]*Model[-]*\\s+" \ "[-]*Serial[-]*\\s+[-]*FW_Rev[-]*" PORT_PATTERN = "^\\s*N:S:P\\s+[-]*Mode[-]*\\s+[-]*State[-]*\\s+[-]*" \ "Node_WWN[-]*\\s+[-]*Port_WWN/HW_Addr[-]*\\s+" PORT_I_PATTERN = "^\\s*N:S:P\\s+Brand\\s+Model\\s+Rev\\s+Firmware\\s+" \ "Serial\\s+HWType" PORT_PER_PATTERN = "^\\s*N:S:P\\s+Connmode\\s+ConnType\\s+CfgRate\\s+MaxRate" PORT_C_PATTERN = "^\\s*N:S:P\\s+Mode\\s+Device\\s+Pos\\s+Config\\s+" \ "Topology\\s+Rate" PORT_ISCSI_PATTERN = "^\\s*N:S:P\\s+State\\s+IPAddr\\s+Netmask/PrefixLen\\s+" \ "Gateway" PORT_RCIP_PATTERN = "^\\s*N:S:P\\s+State\\s+[-]*HwAddr[-]*\\s+IPAddr\\s+" \ "Netmask\\s+Gateway\\s+MTU\\s+Rate" PORT_FCOE_PATTERN = "^\\s*N:S:P\\s+State\\s+" PORT_FS_PATTERN = "^\\s*N:S:P\\s+State\\s+" FPG_PATTERN = "^\\s*FPG\\s+[-]*Mountpath[-]*\\s+[-]*Size[-]*\\s+[-]*" \ "Available[-]*\\s+[-]*ActiveStates" CPG_PATTERN = "^\\s*Id\\s+[-]*Name[-]*\\s+Warn" VOLUME_PATTERN = "^\\s*Id\\s+Name\\s+Prov\\s+Compr\\s+Dedup" FSTORE_PATTERN = "^\\s*Fstore\\s+VFS\\s+FPG\\s+State\\s+Mode" FSHARE_PATTERN = "^\\s*ShareName\\s+Protocol\\s+VFS\\s+FileStore\\s+" \ "ShareDir\\s+State" VFS_PATTERN = "^\\s*VFS\\s+FPG\\s+IPAddr\\s+State" SRSTATPORT_PATTERN = "^\\s*PORT_N\\s+PORT_S\\s+PORT_P\\s+Rd\\s+Wr\\s+" \ "Tot\\s+Rd\\s+Wr\\s+Tot\\s+Rd\\s+Wr\\s+Tot" SRSTATPD_PATTERN = "^\\s*PDID\\s+Rd\\s+Wr\\s+" \ "Tot\\s+Rd\\s+Wr\\s+Tot\\s+Rd\\s+Wr\\s+Tot" SRSTATVV_PATTERN = "^\\s*VVID\\s+VV_NAME\\s+Rd\\s+Wr\\s+" \ "Tot\\s+Rd\\s+Wr\\s+Tot\\s+Rd\\s+Wr\\s+Tot" IPV4_PATTERN = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$" HOST_OR_VV_SET_PATTERN = "^\\s*Id\\s+Name\\s+Members\\s+Comment" HOST_OR_VV_PATTERN = "^\\s*Id\\s+Name\\s+" VLUN_PATTERN = "^\\s*Lun\\s+VVName\\s+HostName" CONTROLLER_STATUS_MAP = { 'OK': constants.ControllerStatus.NORMAL, 'NORMAL': constants.ControllerStatus.NORMAL, 'DEGRADED': constants.ControllerStatus.DEGRADED, 'FAILED': constants.ControllerStatus.FAULT } DISK_PHYSICAL_TYPE_MAP = { 'FC': constants.DiskPhysicalType.FC, 'SSD': constants.DiskPhysicalType.SSD, 'NL': constants.DiskPhysicalType.UNKNOWN } DISK_STATUS_MAP = { 'NORMAL': constants.DiskStatus.NORMAL, 'DEGRADED': constants.DiskStatus.DEGRADED, 'FAILED': constants.DiskStatus.ABNORMAL, 'NEW': constants.DiskStatus.ABNORMAL } PORT_CONNECTION_STATUS_MAP = { 'CONFIG_WAIT': constants.PortConnectionStatus.DISCONNECTED, 'ALPA_WAIT': constants.PortConnectionStatus.DISCONNECTED, 'LOGIN_WAIT': constants.PortConnectionStatus.DISCONNECTED, 'READY': constants.PortConnectionStatus.CONNECTED, 'LOSS_SYNC': constants.PortConnectionStatus.DISCONNECTED, 'ERROR_STATE': constants.PortConnectionStatus.DISCONNECTED, 'XXX': constants.PortConnectionStatus.DISCONNECTED, 'NONPARTICIPATE': constants.PortConnectionStatus.DISCONNECTED, 'COREDUMP': constants.PortConnectionStatus.DISCONNECTED, 'OFFLINE': constants.PortConnectionStatus.DISCONNECTED, 'FWDEAD': constants.PortConnectionStatus.DISCONNECTED, 'IDLE_FOR_RESET': constants.PortConnectionStatus.DISCONNECTED, 'DHCP_IN_PROGRESS': constants.PortConnectionStatus.DISCONNECTED, 'PENDING_RESET': constants.PortConnectionStatus.DISCONNECTED } PORT_TYPE_MAP = { 'FC': constants.PortType.FC, 'ISCSI': constants.PortType.ISCSI, 'ETH': constants.PortType.ETH, 'CNA': constants.PortType.CNA, 'SAS': constants.PortType.SAS, 'COMBO': constants.PortType.COMBO, 'NVMe': constants.PortType.OTHER, 'UNKNOWN': constants.PortType.OTHER, 'RCIP': constants.PortType.RCIP, 'RCFC': constants.PortType.OTHER } VERSION_PATTERN = "^\\s*[-]*Service[-]*\\s+[-]*State[-]*\\s+" SSH_NODE_MEM_TYPE = { 1: "control", 2: "data" } SSH_METRIC_TYPE = { 1: "io", 2: "kbytes", 3: "svct", 4: "iosz" } SSH_COLLECT_TIME_PATTERN = "\\(\\d+\\)" COLLECT_INTERVAL_HIRES = 60000 SIXTY_SECONDS = 60 REST_COLLEC_TTIME_PATTERN = '%Y-%m-%dT%H:%M:%SZ' IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Input/output operations per second" } READ_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Read input/output operations per second" } WRITE_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Write input/output operations per second" } THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data is " "successfully transferred in MB/s" } READ_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data read is " "successfully transferred in MB/s" } WRITE_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data write is " "successfully transferred in MB/s" } RESPONSE_TIME_DESCRIPTION = { "unit": "ms", "description": "Average time taken for an IO " "operation in ms" } CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of io that are cache hits" } READ_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of read ops that are cache hits" } WRITE_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of write ops that are cache hits" } IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of IO requests in KB" } READ_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of read IO requests in KB" } WRITE_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of write IO requests in KB" } POOL_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION } VOLUME_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "ioSize": IO_SIZE_DESCRIPTION, "readIoSize": READ_IO_SIZE_DESCRIPTION, "writeIoSize": WRITE_IO_SIZE_DESCRIPTION } PORT_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION } DISK_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION } SECONDS_PER_HOUR = 3600 HOST_OS_MAP = { 'AIX': constants.HostOSTypes.AIX, 'Citrix Xen Server 5.x/6.x': constants.HostOSTypes.XEN_SERVER, 'Citrix Xen Server 7.x': constants.HostOSTypes.XEN_SERVER, 'HP-UX': constants.HostOSTypes.HP_UX, 'HP-UX (11i v1,11i v2)': constants.HostOSTypes.HP_UX, 'HP-UX (11i v3)': constants.HostOSTypes.HP_UX, 'OpenVMS': constants.HostOSTypes.OPEN_VMS, 'Oracle VM x86': constants.HostOSTypes.ORACLE_VM, 'Solaris 11': constants.HostOSTypes.SOLARIS, 'Solaris 9/10': constants.HostOSTypes.SOLARIS, 'VMware (ESXi)': constants.HostOSTypes.VMWARE_ESX, 'ESXI6.0': constants.HostOSTypes.VMWARE_ESX, 'ESX 4.x/5.x': constants.HostOSTypes.VMWARE_ESX, 'Windows 2003': constants.HostOSTypes.WINDOWS, 'Windows 2008/2008 R2': constants.HostOSTypes.WINDOWS, 'Windows 2012': constants.HostOSTypes.WINDOWS_SERVER_2012, 'Windows 2012 / WS2012 R2': constants.HostOSTypes.WINDOWS_SERVER_2012, 'Windows Server 2016': constants.HostOSTypes.WINDOWS, 'Red Hat Enterprise Linux': constants.HostOSTypes.LINUX, 'OE Linux UEK (5.x, 6.x)': constants.HostOSTypes.LINUX, 'OE Linux UEK 7.x': constants.HostOSTypes.LINUX, 'RHE Linux (5.x, 6.x)': constants.HostOSTypes.LINUX, 'RHE Linux (Pre RHEL 5)': constants.HostOSTypes.LINUX, 'RHE Linux 7.x': constants.HostOSTypes.LINUX, 'SuSE (10.x, 11.x)': constants.HostOSTypes.LINUX, 'SuSE': constants.HostOSTypes.LINUX, 'SuSE 12.x': constants.HostOSTypes.LINUX, 'SuSE Linux (Pre SLES 10)': constants.HostOSTypes.LINUX, 'SuSE Virtualization': constants.HostOSTypes.LINUX } ================================================ FILE: delfin/drivers/hpe/hpe_3par/hpe_3parstor.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log from delfin import context from delfin.common import constants from delfin.drivers import driver from delfin.drivers.hpe.hpe_3par import alert_handler, consts from delfin.drivers.hpe.hpe_3par import component_handler from delfin.drivers.hpe.hpe_3par import rest_handler from delfin.drivers.hpe.hpe_3par import ssh_handler from delfin.drivers.utils.rest_client import RestClient LOG = log.getLogger(__name__) # Hpe3parStor Driver class Hpe3parStorDriver(driver.StorageDriver): """Hpe3parStorDriver implement Hpe 3par Stor driver, """ def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_client = RestClient(**kwargs) self.rest_client.verify = kwargs.get('verify', False) self.rest_handler = rest_handler.RestHandler(self.rest_client) self.rest_handler.login() self.ssh_handler = ssh_handler.SSHHandler(**kwargs) self.version = self.ssh_handler.login(context) self.comhandler = component_handler.ComponentHandler( rest_handler=self.rest_handler, ssh_handler=self.ssh_handler) self.alert_handler = alert_handler.AlertHandler( rest_handler=self.rest_handler, ssh_handler=self.ssh_handler) def reset_connection(self, context, **kwargs): try: self.rest_handler.logout() except Exception as e: LOG.warning('logout failed when resetting connection, ' 'reason is %s' % six.text_type(e)) self.rest_client.verify = kwargs.get('verify', False) self.rest_handler.login() def close_connection(self): self.rest_handler.logout() def get_storage(self, context): return self.comhandler.get_storage(context) def list_storage_pools(self, context): self.comhandler.set_storage_id(self.storage_id) return self.comhandler.list_storage_pools(context) def list_volumes(self, context): self.comhandler.set_storage_id(self.storage_id) return self.comhandler.list_volumes(context) def list_controllers(self, context): return self.comhandler.list_controllers(self.storage_id) def list_ports(self, context): return self.comhandler.list_ports(self.storage_id) def list_disks(self, context): return self.comhandler.list_disks(self.storage_id) def list_alerts(self, context, query_para=None): return self.alert_handler.list_alerts(context, query_para) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return alert_handler.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, alert): return self.alert_handler.clear_alert(context, alert) def list_storage_host_initiators(self, context): return self.comhandler.list_storage_host_initiators(self.storage_id) def list_storage_hosts(self, context): return self.comhandler.list_storage_hosts(self.storage_id) def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): return self.comhandler.collect_perf_metrics(storage_id, resource_metrics, start_time, end_time) @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver""" return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.STORAGE_POOL: consts.POOL_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP } } def get_latest_perf_timestamp(self, context): return self.comhandler.get_latest_perf_timestamp() def list_storage_host_groups(self, context): return self.comhandler.list_storage_host_groups(self.storage_id) def list_port_groups(self, context): return self.comhandler.list_port_groups(self.storage_id) def list_volume_groups(self, context): return self.comhandler.list_volume_groups(self.storage_id) def list_masking_views(self, context): return self.comhandler.list_masking_views(self.storage_id) ================================================ FILE: delfin/drivers/hpe/hpe_3par/rest_handler.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import six from oslo_log import log as logging from delfin import cryptor from delfin import exception from delfin.drivers.hpe.hpe_3par import consts from delfin.drivers.utils.tools import Tools LOG = logging.getLogger(__name__) class RestHandler(object): """Common class for Hpe 3parStor storage system.""" REST_AUTH_URL = '/api/v1/credentials' REST_LOGOUT_URL = '/api/v1/credentials/' REST_STORAGE_URL = '/api/v1/system' REST_CAPACITY_URL = '/api/v1/capacity' REST_POOLS_URL = '/api/v1/cpgs' REST_VOLUMES_URL = '/api/v1/volumes' REST_ALERTS_URL = '/api/v1/eventlog?query="category EQ 2"' REST_HOSTS_URL = '/api/v1/hosts' REST_AUTH_KEY = 'X-HP3PAR-WSAPI-SessionKey' REST_CPGSTATISTICS_URL = '/api/v1/systemreporter' \ '/attime/cpgstatistics/hires?' \ 'query="sampleTime GE %s AND sampleTime LE %s"' session_lock = None def __init__(self, rest_client): self.rest_client = rest_client self.session_lock = threading.Lock() def call(self, url, data=None, method=None): """Send requests to server. If fail, try another RestURL. Increase the judgment of token invalidation """ try: res = self.call_with_token(url, data, method, calltimeout=consts.SOCKET_TIMEOUT) # Judge whether the access failure is caused by # the token invalidation. # If the token fails, it will be retrieved again, # and the token will be accessed again if res is not None: # 403 The client request has an invalid session key. # The request came from a different IP address # 409 Session key is being used. if (res.status_code == consts.ERROR_SESSION_INVALID_CODE or res.status_code == consts.ERROR_SESSION_IS_BEING_USED_CODE): LOG.error( "Failed to get token=={0}=={1}".format(res.status_code, res.text)) LOG.error("Failed to get token,relogin,Get token again") # if method is logout,return immediately if method == 'DELETE' and RestHandler.\ REST_LOGOUT_URL in url: return res self.rest_client.rest_auth_token = None access_session = self.login() # if get token,Revisit url if access_session is not None: res = self.call_with_token( url, data, method, calltimeout=consts.SOCKET_TIMEOUT) else: LOG.error('Login res is None') elif res.status_code == 503: raise exception.InvalidResults(res.text) else: LOG.error('Rest exec failed') return res except exception.DelfinException as e: err_msg = "Call failed: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as e: err_msg = "Get RestHandler.call failed: %s" % (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_resinfo_call(self, url, data=None, method=None): rejson = None res = self.call(url, data, method) if res is not None: if res.status_code == consts.SUCCESS_STATUS_CODES: rejson = res.json() else: if res.text and 'unsupported' in res.text: LOG.warning('rest api error: {}'.format(res.text)) else: raise exception.StorageBackendException(res.text) return rejson def login(self): """Login Hpe3par storage array.""" try: access_session = self.rest_client.rest_auth_token if self.rest_client.san_address: url = RestHandler.REST_AUTH_URL data = {"user": self.rest_client.rest_username, "password": cryptor.decode( self.rest_client.rest_password) } self.session_lock.acquire() if self.rest_client.rest_auth_token is not None: return self.rest_client.rest_auth_token self.rest_client.init_http_head() res = self.rest_client. \ do_call(url, data, 'POST', calltimeout=consts.SOCKET_TIMEOUT) if res is None: LOG.error('Login res is None') raise exception.InvalidResults('res is None') if res.status_code == consts. \ LOGIN_SUCCESS_STATUS_CODES: result = res.json() access_session = result.get('key') self.rest_client.rest_auth_token = cryptor.encode( access_session) self.rest_client.session.headers[ RestHandler.REST_AUTH_KEY] = cryptor.encode( access_session) else: LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", {"url": url, "reason": res.text}) if 'invalid username or password' in res.text: raise exception.InvalidUsernameOrPassword() else: raise exception.StorageBackendException( six.text_type(res.text)) else: LOG.error('Login Parameter error') return access_session except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e finally: self.session_lock.release() def logout(self): """Logout the session.""" try: url = RestHandler.REST_LOGOUT_URL if self.rest_client.rest_auth_token is not None: url = '%s%s' % ( url, cryptor.decode(self.rest_client.rest_auth_token)) self.rest_client.rest_auth_token = None if self.rest_client.san_address: self.call(url, method='DELETE') if self.rest_client.session: self.rest_client.session.close() except exception.DelfinException as e: err_msg = "Logout error: %s" % (e.msg) LOG.error(err_msg) raise e except Exception as e: err_msg = "Logout error: %s" % (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def call_with_token(self, url, data=None, method='GET', calltimeout=consts.SOCKET_TIMEOUT): with self.session_lock: auth_key = None if self.rest_client.session: auth_key = self.rest_client.session.headers.get( RestHandler.REST_AUTH_KEY, None) if auth_key: self.rest_client.session.headers[ RestHandler.REST_AUTH_KEY] = cryptor.decode(auth_key) res = self.rest_client.do_call(url, data, method, calltimeout) if auth_key: self.rest_client.session.headers[ RestHandler.REST_AUTH_KEY] = auth_key return res def get_storage(self): rejson = self.get_resinfo_call(RestHandler.REST_STORAGE_URL, method='GET') return rejson def get_capacity(self): rejson = self.get_resinfo_call(RestHandler.REST_CAPACITY_URL, method='GET') return rejson def get_all_pools(self): rejson = self.get_resinfo_call(RestHandler.REST_POOLS_URL, method='GET') return rejson def get_all_volumes(self): rejson = self.get_resinfo_call(RestHandler.REST_VOLUMES_URL, method='GET') return rejson def get_pool_metrics(self, start_time, end_time): start_time_str = Tools.timestamp_to_utc_time_str( start_time, consts.REST_COLLEC_TTIME_PATTERN) end_time_str = Tools.timestamp_to_utc_time_str( end_time, consts.REST_COLLEC_TTIME_PATTERN) url = RestHandler.REST_CPGSTATISTICS_URL % ( start_time_str, end_time_str) rejson = self.get_resinfo_call(url, method='GET') return rejson def list_storage_host(self): rejson = self.get_resinfo_call(RestHandler.REST_HOSTS_URL, method='GET') return rejson ================================================ FILE: delfin/drivers/hpe/hpe_3par/ssh_handler.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import time import six from oslo_log import log as logging from oslo_utils import units from delfin import exception from delfin import utils from delfin.drivers.hpe.hpe_3par import consts from delfin.drivers.utils.ssh_client import SSHPool from delfin.drivers.utils.tools import Tools LOG = logging.getLogger(__name__) class SSHHandler(object): """Common class for Hpe 3parStor storage system.""" HPE3PAR_COMMAND_SHOWWSAPI = 'showwsapi' HPE3PAR_COMMAND_CHECKHEALTH = 'checkhealth vv vlun task snmp ' \ 'port pd node network ld dar cage cabling' HPE3PAR_COMMAND_SHOWALERT = 'showalert -d' HPE3PAR_COMMAND_REMOVEALERT = 'removealert -f %s' ALERT_NOT_EXIST_MSG = 'Unable to read alert' HPE3PAR_COMMAND_SHOWNODE = 'shownode' HPE3PAR_COMMAND_SHOWNODE_CPU = 'shownode -cpu' HPE3PAR_COMMAND_SHOWEEPROM = 'showeeprom' HPE3PAR_COMMAND_SHOWPD = 'showpd' HPE3PAR_COMMAND_SHOWPD_I = 'showpd -i' HPE3PAR_COMMAND_SHOWPORT = 'showport' HPE3PAR_COMMAND_SHOWPORT_I = 'showport -i' HPE3PAR_COMMAND_SHOWPORT_PAR = 'showport -par' HPE3PAR_COMMAND_SHOWPORT_C = 'showport -c' HPE3PAR_COMMAND_SHOWPORT_ISCSI = 'showport -iscsi' HPE3PAR_COMMAND_SHOWPORT_RCIP = 'showport -rcip' HPE3PAR_COMMAND_SHOWPORT_FCOE = 'showport -fcoe' HPE3PAR_COMMAND_SHOWPORT_FS = 'showport -fs' HPE3PAR_COMMAND_SHOWHOSTSET_D = 'showhostset -d' HPE3PAR_COMMAND_SHOWVVSET_D = 'showvvset -d' HPE3PAR_COMMAND_SHOWHOST_D = 'showhost -d' HPE3PAR_COMMAND_SHOWVV = 'showvv' HPE3PAR_COMMAND_SHOWVLUN_T = 'showvlun -t' HPE3PAR_COMMAND_SRSTATPORT = 'srstatport -attime -groupby ' \ 'PORT_N,PORT_S,PORT_P -btsecs %d -etsecs %d' HPE3PAR_COMMAND_SRSTATPD = 'srstatpd -attime -btsecs %d -etsecs %d' HPE3PAR_COMMAND_SRSTATVV = 'srstatvv -attime -groupby VVID,VV_NAME' \ ' -btsecs %d -etsecs %d' HPE3PAR_COMMAND_SRSTATPD_ATTIME = 'srstatpd -attime' def __init__(self, **kwargs): self.kwargs = kwargs self.ssh_pool = SSHPool(**kwargs) def login(self, context): """Test SSH connection """ version = '' try: re = self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWWSAPI) if re: version = self.get_version(re) except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e return version def get_version(self, wsapi_infos): """get wsapi version """ version = '' try: version_list = self.parse_datas_to_list(wsapi_infos, consts.VERSION_PATTERN) if version_list and version_list[0]: version = version_list[0].get('version') except Exception as e: LOG.error("Get version error: %s, wsapi info: %s" % ( six.text_type(e), wsapi_infos)) return version def get_health_state(self): """Check the hardware and software health status of the storage system return: System is healthy """ return self.exec_command(SSHHandler.HPE3PAR_COMMAND_CHECKHEALTH) def get_all_alerts(self): return self.exec_command(SSHHandler.HPE3PAR_COMMAND_SHOWALERT) def remove_alerts(self, alert_id): """Clear alert from storage system. Currently not implemented removes command : removealert """ utils.check_ssh_injection([alert_id]) command_str = SSHHandler.HPE3PAR_COMMAND_REMOVEALERT % alert_id res = self.exec_command(command_str) if res: if self.ALERT_NOT_EXIST_MSG not in res: raise exception.InvalidResults(six.text_type(res)) LOG.warning("Alert %s doesn't exist.", alert_id) def get_controllers(self): para_map = { 'command': 'parse_node_table' } return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE, self.parse_datas_to_list, pattern_str=consts.NODE_PATTERN, para_map=para_map) def get_controllers_cpu(self): para_map = { 'command': 'parse_node_cpu' } return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWNODE_CPU, self.parse_datas_to_map, pattern_str=consts.CPU_PATTERN, para_map=para_map, throw_excep=False) def get_controllers_version(self): return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWEEPROM, self.parse_node_version, throw_excep=False) def parse_node_version(self, resource_info, pattern_str, para_map=None): node_version_map = {} node_info_map = {} try: obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: if str_line.startswith('Node:'): str_info = self.split_str_by_colon(str_line) node_info_map['node_id'] = str_info[1] if str_line.startswith('OS version:'): str_info = self.split_str_by_colon(str_line) node_info_map['node_os_version'] = str_info[1] else: if node_info_map: node_version_map[ node_info_map.get('node_id')] = node_info_map.get( 'node_os_version') node_info_map = {} except Exception as e: err_msg = "Analyse node version info error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return node_version_map def split_str_by_colon(self, str_line): str_info = [] if str_line: # str_info[0] is the parsed attribute name, there are some special # characters such as spaces, brackets, etc., # str_info[1] is the value str_info = str_line.split(':', 1) str_info[0] = str_info[0].strip() str_info[0] = str_info[0].replace(" ", "_") \ .replace("(", "").replace(")", "").lower() if len(str_info) > 1: str_info[1] = str_info[1].strip() return str_info def get_disks(self): return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPD, self.parse_datas_to_list, pattern_str=consts.DISK_PATTERN) def get_disks_inventory(self): inventory_map = {} para_map = { 'command': 'parse_disk_table' } inventorys = self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWPD_I, self.parse_datas_to_list, pattern_str=consts.DISK_I_PATTERN, para_map=para_map, throw_excep=False) for inventory in (inventorys or []): inventory_map[inventory.get('disk_id')] = inventory return inventory_map def get_ports(self): return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT, self.parse_datas_to_list, pattern_str=consts.PORT_PATTERN) def get_ports_inventory(self): para_map = { 'key_position': 0, 'value_position': 'last' } return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_I, self.parse_datas_to_map, pattern_str=consts.PORT_I_PATTERN, para_map=para_map, throw_excep=False) def get_ports_config(self): para_map = { 'key_position': 0, 'value_position': 4 } return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_PAR, self.parse_datas_to_map, pattern_str=consts.PORT_PER_PATTERN, para_map=para_map, throw_excep=False) def get_ports_iscsi(self): iscsis_map = {} iscsis = self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWPORT_ISCSI, self.parse_datas_to_list, pattern_str=consts.PORT_ISCSI_PATTERN, throw_excep=False) for iscsi in (iscsis or []): iscsis_map[iscsi.get('n:s:p')] = iscsi return iscsis_map def get_ports_connected(self): para_map = { 'key_position': 0, 'value_position': 6 } return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWPORT_C, self.parse_datas_to_map, pattern_str=consts.PORT_C_PATTERN, para_map=para_map, throw_excep=False) def get_ports_rcip(self): rcip_map = {} rcips = self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWPORT_RCIP, self.parse_datas_to_list, pattern_str=consts.PORT_RCIP_PATTERN, throw_excep=False) for rcip in (rcips or []): rcip_map[rcip.get('n:s:p')] = rcip return rcip_map def get_ports_fs(self): port_fs_map = {} port_fss = self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FS, self.parse_datas_to_list, pattern_str=consts.PORT_FS_PATTERN, throw_excep=False) for port_fs in (port_fss or []): port_fs_map[port_fs.get('n:s:p')] = port_fs return port_fs_map def get_ports_fcoe(self): fcoe_map = {} fcoes = self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWPORT_FCOE, self.parse_datas_to_list, pattern_str=consts.PORT_FCOE_PATTERN, throw_excep=False) for fcoe in (fcoes or []): fcoe_map[fcoe.get('n:s:p')] = fcoe return fcoe_map def parse_datas_to_list(self, resource_info, pattern_str, para_map=None): obj_list = [] titles_size = 9999 try: pattern = re.compile(pattern_str) obj_infos = resource_info.split('\n') titles = [] for obj_info in obj_infos: str_line = obj_info.strip() if str_line: search_obj = pattern.search(str_line) if search_obj: titles = str_line.split() titles_size = len(titles) else: str_info = str_line.split() cols_size = len(str_info) if para_map and para_map.get('command', '') \ == 'parse_disk_table': obj_list = self.parse_disk_table(cols_size, titles_size, str_info, obj_list, titles) elif para_map and para_map.get('command', '') \ == 'parse_node_table': obj_list = self.parse_node_table(cols_size, titles_size, str_info, obj_list, titles) elif para_map and para_map.get('command', '') \ == 'parse_metric_table': if '---------------------------------' in str_line: break if 'Time:' in str_line: collect_time = Tools.get_numbers_in_brackets( str_line, consts.SSH_COLLECT_TIME_PATTERN) if collect_time: collect_time = int(collect_time) * units.k else: collect_time = int(time.time() * units.k) para_map['collect_time'] = collect_time obj_list = self.parse_metric_table(cols_size, titles_size, str_info, obj_list, titles, para_map) elif para_map and para_map.get('command', '') \ == 'parse_set_groups_table': if '---------------------------------' in str_line: break obj_list = self.parse_set_groups_table(cols_size, titles_size, str_info, obj_list) elif para_map and para_map.get('command', '') \ == 'parse_view_table': if '---------------------------------' in str_line: break obj_list = self.parse_view_table(cols_size, titles_size, str_info, obj_list, titles) else: if cols_size == titles_size: obj_model = {} for i in range(0, cols_size): key = titles[i].lower().replace('-', '') obj_model[key] = str_info[i] if obj_model: obj_list.append(obj_model) except Exception as e: err_msg = "Analyse datas to list error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_list def parse_datas_to_map(self, resource_info, pattern_str, para_map=None): obj_model = {} titles = [] titles_size = 9999 try: pattern = re.compile(pattern_str) obj_infos = resource_info.split('\n') for obj_info in obj_infos: str_line = obj_info.strip() if str_line: search_obj = pattern.search(str_line) if search_obj: titles = str_line.split() titles_size = len(titles) else: str_info = str_line.split() cols_size = len(str_info) if para_map and para_map.get('command', '') == 'parse_node_cpu': obj_model = self.parse_node_cpu(cols_size, titles_size, str_info, obj_model, titles) else: if cols_size >= titles_size: key_position = para_map.get('key_position') value_position = para_map.get('value_position') if para_map.get('value_position') == 'last': value_position = cols_size - 1 obj_model[str_info[key_position]] = str_info[ value_position] except Exception as e: err_msg = "Analyse datas to map error: %s", six.text_type(e) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return obj_model def parse_disk_table(self, cols_size, titles_size, str_info, obj_list, titles): if cols_size >= titles_size: fw_rev_index = self.get_index_of_key(titles, 'FW_Rev') if fw_rev_index: inventory_map = { 'disk_id': str_info[0], 'disk_mfr': ' '.join(str_info[4:fw_rev_index - 2]), 'disk_model': str_info[fw_rev_index - 2], 'disk_serial': str_info[fw_rev_index - 1], 'disk_fw_rev': str_info[fw_rev_index] } obj_list.append(inventory_map) return obj_list def parse_node_table(self, cols_size, titles_size, str_info, obj_list, titles): if cols_size >= titles_size: obj_model = {} num_prefix = 1 for i in range(cols_size): key_prefix = '' key = titles[i].lower().replace('-', '') if key == 'mem(mb)': key_prefix = consts.SSH_NODE_MEM_TYPE.get(num_prefix) num_prefix += 1 key = '%s%s' % (key_prefix, key) obj_model[key] = str_info[i] if obj_model: obj_list.append(obj_model) return obj_list def parse_node_cpu(self, cols_size, titles_size, str_info, obj_map, titles): if cols_size >= titles_size: if 'Cores' in titles: node_id = str_info[0] cpu_info = ' '.join(str_info[5:]) cpu_map = obj_map.setdefault(node_id, {}) cpu_map[cpu_info] = int(str_info[2]) else: node_id = str_info[0] cpu_info = str_info[4] cpu_map = obj_map.setdefault(node_id, {}) cpu_map[cpu_info] = cpu_map.get(cpu_info, 0) + 1 return obj_map def parse_metric_table(self, cols_size, titles_size, str_info, obj_list, titles, para_map): if cols_size == titles_size: obj_model = {} metric_type_num = 1 key_prefix = '' for i in range(0, cols_size): key = titles[i].lower().replace('-', '') if key == 'rd': key_prefix = consts.SSH_METRIC_TYPE.get(metric_type_num) metric_type_num += 1 key = '%s%s' % (key_prefix, key) obj_model[key] = str_info[i] if obj_model: if para_map and para_map.get('collect_time'): obj_model['collect_time'] = para_map.get('collect_time') obj_list.append(obj_model) return obj_list def get_index_of_key(self, titles_list, key): if titles_list: for title in titles_list: if key in title: return titles_list.index(title) return None def get_resources_info(self, command, parse_type, pattern_str=None, para_map=None, throw_excep=True): re = self.exec_command(command) resources_info = None try: if re: resources_info = parse_type(re, pattern_str, para_map=para_map) except Exception as e: LOG.error("Get %s info error: %s" % (command, six.text_type(e))) if throw_excep: raise e return resources_info def exec_command(self, command): re = self.ssh_pool.do_exec(command) if re: if 'invalid command name' in re or 'Invalid option' in re: LOG.warning(re) raise NotImplementedError(re) elif 'Too many local CLI connections' in re: LOG.error("command %s failed: %s" % (command, re)) raise exception.StorageBackendException(re) return re def get_volumes(self): return self.get_resources_info(SSHHandler.HPE3PAR_COMMAND_SHOWVV, self.parse_datas_to_list, pattern_str=consts.VOLUME_PATTERN) def get_port_metrics(self, start_time, end_time): command = SSHHandler.HPE3PAR_COMMAND_SRSTATPORT % ( int(start_time / units.k), int(end_time / units.k)) return self.get_resources_info(command, self.parse_datas_to_list, pattern_str=consts.SRSTATPORT_PATTERN, para_map={ 'command': 'parse_metric_table'}) def get_disk_metrics(self, start_time, end_time): command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD_ATTIME if start_time and end_time: command = SSHHandler.HPE3PAR_COMMAND_SRSTATPD % ( int(start_time / units.k), int(end_time / units.k)) return self.get_resources_info(command, self.parse_datas_to_list, pattern_str=consts.SRSTATPD_PATTERN, para_map={ 'command': 'parse_metric_table'}) def get_volume_metrics(self, start_time, end_time): command = SSHHandler.HPE3PAR_COMMAND_SRSTATVV % ( int(start_time / units.k), int(end_time / units.k)) return self.get_resources_info(command, self.parse_datas_to_list, pattern_str=consts.SRSTATVV_PATTERN, para_map={ 'command': 'parse_metric_table'}) def list_storage_host_groups(self): para_map = { 'command': 'parse_set_groups_table' } return self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWHOSTSET_D, self.parse_datas_to_list, pattern_str=consts.HOST_OR_VV_SET_PATTERN, para_map=para_map) def list_volume_groups(self): para_map = { 'command': 'parse_set_groups_table' } return self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWVVSET_D, self.parse_datas_to_list, pattern_str=consts.HOST_OR_VV_SET_PATTERN, para_map=para_map) def parse_set_groups_table(self, cols_size, titles_size, str_info, obj_list): if cols_size >= titles_size: members = [] value = str_info[2].replace('-', '') if value: members = [str_info[2]] obj_model = { 'id': str_info[0], 'name': str_info[1], 'members': members, 'comment': (" ".join(str_info[3:])).replace('-', ''), } obj_list.append(obj_model) elif obj_list and cols_size == 1: value = str_info[0].replace('-', '') if value: obj_model = obj_list[-1] if obj_model and obj_model.get('members'): obj_model.get('members').append(str_info[0]) else: members = [str_info[0]] obj_model['members'] = members return obj_list def parse_view_table(self, cols_size, titles_size, str_info, obj_list, titles): if cols_size >= titles_size: obj_model = {} for i in range(titles_size): key = titles[i].lower().replace('-', '') obj_model[key] = str_info[i] if obj_model: obj_list.append(obj_model) return obj_list def get_resources_ids(self, command, pattern_str, para_map=None): if not para_map: para_map = { 'key_position': 1, 'value_position': 0 } return self.get_resources_info(command, self.parse_datas_to_map, pattern_str=pattern_str, para_map=para_map, throw_excep=False) def list_storage_host_initiators(self): return self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWHOST_D, self.parse_datas_to_list, pattern_str=consts.HOST_OR_VV_PATTERN) def list_masking_views(self): para_map = { 'command': 'parse_view_table' } return self.get_resources_info( SSHHandler.HPE3PAR_COMMAND_SHOWVLUN_T, self.parse_datas_to_list, pattern_str=consts.VLUN_PATTERN, para_map=para_map) ================================================ FILE: delfin/drivers/hpe/hpe_msa/__init__.py ================================================ ================================================ FILE: delfin/drivers/hpe/hpe_msa/consts.py ================================================ from delfin.common import constants class AlertOIDNumber(object): OID_ERR_ID = '1.3.6.1.3.94.1.11.1.1' OID_EVENT_TYPE = '1.3.6.1.3.94.1.11.1.7' OID_LAST_TIME = '1.3.6.1.3.94.1.11.1.4' OID_EVENT_DESC = '1.3.6.1.3.94.1.11.1.9' OID_EVENT_ID = '1.3.6.1.3.94.1.11.1.3' OID_SEVERITY = '1.3.6.1.3.94.1.11.1.6' class StorageVendor(object): HPE_MSA_VENDOR = "HPE" class TrapSeverity(object): TRAP_SEVERITY_MAP = { '1': 'unknown', '2': 'emergency', '3': 'alert', '4': constants.Severity.CRITICAL, '5': 'error', '6': constants.Severity.WARNING, '7': 'notify', '8': constants.Severity.INFORMATIONAL, '9': 'debug', '10': 'mark' } SEVERITY_MAP = { "warning": "Warning", "informational": "Informational", "error": "Major" } class SecondsNumber(object): SECONDS_TO_MS = 1000 class RpmSpeed(object): RPM_SPEED = 1000 class DiskPhysicalType(object): DISK_PHYSICAL_TYPE = { 'fc': constants.DiskPhysicalType.FC, 'SAS': constants.DiskPhysicalType.SAS } class InitiatorType(object): ISCSI_INITIATOR_TYPE = "9" FC_INITIATOR_TYPE = "6" ISCSI_INITIATOR_DESCRIPTION = constants.InitiatorType.ISCSI FC_INITIATOR_DESCRIPTION = constants.InitiatorType.FC UNKNOWN_INITIATOR_DESCRIPTION = constants.InitiatorType.UNKNOWN class HostOSTypes(object): MSA_HOST_TYPE = { 'HP-UX': constants.HostOSTypes.HP_UX } ================================================ FILE: delfin/drivers/hpe/hpe_msa/hpe_msastor.py ================================================ from delfin.drivers import driver from delfin.drivers.hpe.hpe_msa import ssh_handler from delfin.drivers.hpe.hpe_msa.ssh_handler import SSHHandler class HpeMsaStorDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.ssh_handler = ssh_handler.SSHHandler(**kwargs) def reset_connection(self, context, **kwargs): self.ssh_handler.login() def get_storage(self, context): return self.ssh_handler.get_storage(self.storage_id) def list_storage_pools(self, context): return self.ssh_handler.list_storage_pools(self.storage_id) def list_volumes(self, context): return self.ssh_handler.list_storage_volume(self.storage_id) def list_controllers(self, context): return self.ssh_handler.\ list_storage_controller(self.storage_id) def list_ports(self, context): return self.ssh_handler.list_storage_ports(self.storage_id) def list_disks(self, context): return self.ssh_handler.list_storage_disks(self.storage_id) def list_alerts(self, context, query_para=None): return self.ssh_handler.list_alerts(query_para) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return SSHHandler.parse_alert(alert) def clear_alert(self, context, alert): pass def list_storage_host_initiators(self, context): return self.ssh_handler.list_storage_host_initiators(self.storage_id) def list_storage_hosts(self, context): return self.ssh_handler.list_storage_hosts(self.storage_id) def list_storage_host_groups(self, context): return self.ssh_handler.list_storage_host_groups(self.storage_id) def list_port_groups(self, context): return self.ssh_handler.list_port_groups(self.storage_id) def list_volume_groups(self, context): return self.ssh_handler.list_volume_groups(self.storage_id) def list_masking_views(self, context): return self.ssh_handler.list_masking_views(self.storage_id) @staticmethod def get_access_url(): return 'https://{ip}' ================================================ FILE: delfin/drivers/hpe/hpe_msa/ssh_handler.py ================================================ import hashlib import time import six from oslo_log import log as logging from operator import itemgetter from itertools import groupby from delfin import exception from delfin.common import constants, alert_util from delfin.drivers.utils.ssh_client import SSHPool from delfin.drivers.utils.tools import Tools from delfin.drivers.hpe.hpe_msa import consts try: import defusedxml.cElementTree as Et except ImportError: import defusedxml.ElementTree as Et LOG = logging.getLogger(__name__) class SSHHandler(object): def __init__(self, **kwargs): self.ssh_pool = SSHPool(**kwargs) def login(self): try: self.ssh_pool.do_exec('show pools') except Exception as e: LOG.error("Failed to login msa %s" % (six.text_type(e))) raise e def get_storage(self, storage_id): try: system_info = self.ssh_pool.do_exec('show system') system_data = self.handle_xml_to_dict(system_info, 'system') version_info = self.ssh_pool.do_exec('show version') version_arr = self.handle_xml_to_json(version_info, 'versions') version_id = "" if version_arr: version_id = version_arr[0].get('bundle-version') if system_data: pools_list = self.list_storage_pools(storage_id) total_capacity = 0 if pools_list: for pool in pools_list: total_capacity += int(pool.get('total_capacity')) disks_list = self.list_storage_disks(storage_id) raw_capacity = 0 if disks_list: for disk in disks_list: raw_capacity += int(disk.get('capacity')) volumes_list = self.list_storage_volume(storage_id) volume_all_size = 0 if volumes_list: for volume in volumes_list: volume_all_size += int(volume.get('total_capacity')) health = system_data.get('health') status = constants.StorageStatus.OFFLINE if health == 'OK': status = constants.StorageStatus.NORMAL elif health == 'Degraded': status = constants.StorageStatus.DEGRADED serial_num = system_data.get('midplane-serial-number') storage_map = { 'name': system_data.get('system-name'), 'vendor': consts.StorageVendor.HPE_MSA_VENDOR, 'model': system_data.get('product-id'), 'status': status, 'serial_number': serial_num, 'firmware_version': version_id, 'location': system_data.get('system-location'), 'raw_capacity': int(raw_capacity), 'total_capacity': int(total_capacity), 'used_capacity': int(volume_all_size), 'free_capacity': int(total_capacity - volume_all_size) } return storage_map except Exception as e: err_msg = "Failed to get system info : %s" % (six.text_type(e)) LOG.error(err_msg) raise e def list_storage_disks(self, storage_id): try: disk_info = self.ssh_pool.do_exec('show disks') disk_detail = self.handle_xml_to_json(disk_info, 'drives') disks_arr = [] for data in disk_detail: health = data.get('health') status = constants.StoragePoolStatus.OFFLINE if health == 'OK': status = constants.StoragePoolStatus.NORMAL size = self.parse_string_to_bytes(data.get('size')) physical_type = consts.DiskPhysicalType.\ DISK_PHYSICAL_TYPE.get(data.get('description'), constants.DiskPhysicalType. UNKNOWN) rpm = data.get('rpm') if rpm: rpm = int(rpm) * consts.RpmSpeed.RPM_SPEED data_map = { 'native_disk_id': data.get('location'), 'name': data.get('location'), 'physical_type': physical_type, 'status': status, 'storage_id': storage_id, 'native_disk_group_id': data.get('disk-group'), 'serial_number': data.get('serial-number'), 'manufacturer': data.get('vendor'), 'model': data.get('model'), 'speed': rpm, 'capacity': int(size), 'health_score': status } disks_arr.append(data_map) return disks_arr except Exception as e: err_msg = "Failed to get storage disk: %s" % (six.text_type(e)) LOG.error(err_msg) raise e def list_storage_ports(self, storage_id): try: ports_info = self.ssh_pool.do_exec('show ports') ports_split = ports_info.split('\n') ports_array = ports_split[1:len(ports_split) - 1] ports_xml_data = ''.join(ports_array) xml_element = Et.fromstring(ports_xml_data) ports_json = [] for element_data in xml_element.iter('OBJECT'): property_name = element_data.get('basetype') if property_name != 'status': msg = {} for child in element_data.iter('PROPERTY'): msg[child.get('name')] = child.text ports_json.append(msg) ports_elements_info = [] for i in range(0, len(ports_json) - 1, 2): port_element = ports_json[i].copy() port_element.update(ports_json[i + 1]) ports_elements_info.append(port_element) list_ports = [] for data in ports_elements_info: status = constants.PortHealthStatus.NORMAL conn_status = constants.PortConnectionStatus.CONNECTED if data.get('health') != 'OK': status = constants.PortHealthStatus.ABNORMAL conn_status = constants.PortConnectionStatus.\ DISCONNECTED wwn = None port_type = constants.PortType.FC location_port_type = data.get('port-type') if location_port_type: location_port_type = location_port_type.upper() if location_port_type == 'ISCSI': port_type = constants.PortType.ETH else: target_id = data.get('target-id') if target_id: wwn = target_id location = '%s_%s' % (data.get('port'), location_port_type) speed = data.get('configured-speed', None) max_speed = 0 if speed != 'Auto' and speed is not None: max_speed = self.parse_string_to_bytes(speed) data_map = { 'native_port_id': data.get('durable-id'), 'name': data.get('port'), 'type': port_type, 'connection_status': conn_status, 'health_status': status, 'location': location, 'storage_id': storage_id, 'speed': max_speed, 'max_speed': max_speed, 'mac_address': data.get('mac-address'), 'ipv4': data.get('ip-address'), 'wwn': wwn } list_ports.append(data_map) return list_ports except Exception as e: err_msg = "Failed to get storage ports: %s" % (six.text_type(e)) LOG.error(err_msg) raise e def list_storage_controller(self, storage_id): try: controller_info = self.ssh_pool.do_exec('show controllers') controller_detail = self.handle_xml_to_json( controller_info, 'controllers') controller_arr = [] for data in controller_detail: health = data.get('health') status = constants.StoragePoolStatus.OFFLINE if health == 'OK': status = constants.StoragePoolStatus.NORMAL cpu_info = data.get('sc-cpu-type') cpu_count = None if cpu_info: cpu_count = 1 memory_size = data.get('system-memory-size') if memory_size is not None: memory_size += "MB" system_memory_size = self.parse_string_to_bytes( memory_size) data_map = { 'native_controller_id': data.get('controller-id'), 'name': data.get('durable-id'), 'storage_id': storage_id, 'status': status, 'location': data.get('position'), 'soft_version': data.get('sc-fw'), 'cpu_info': cpu_info, 'cpu_count': cpu_count, 'memory_size': int(system_memory_size) } controller_arr.append(data_map) return controller_arr except Exception as e: err_msg = "Failed to get storage controllers: %s"\ % (six.text_type(e)) LOG.error(err_msg) raise e def list_storage_volume(self, storage_id): try: volume_infos = self.ssh_pool.do_exec('show volumes') volume_detail = self.handle_xml_to_json(volume_infos, 'volumes') pools_info = self.ssh_pool.do_exec('show pools') pool_detail = self.handle_xml_to_json(pools_info, 'pools') list_volumes = [] for data in volume_detail: health = data.get('health') status = constants.StoragePoolStatus.OFFLINE if health == 'OK': status = constants.StoragePoolStatus.NORMAL total_size = self.parse_string_to_bytes(data.get('total-size')) total_avail = self.parse_string_to_bytes( data.get('allocated-size')) native_storage_pool_id = '' if pool_detail: native_storage_pool_id = pool_detail[0]. \ get('serial-number') for pools in pool_detail: if data.get('virtual-disk-name') == pools.\ get('name'): native_storage_pool_id = pools.\ get('serial-number') blocks = data.get('blocks') if blocks is not None: blocks = int(blocks) volume_map = { 'name': data.get('volume-name'), 'storage_id': storage_id, 'description': data.get('volume-name'), 'status': status, 'native_volume_id': str(data.get('durable-id')), 'native_storage_pool_id': native_storage_pool_id, 'wwn': str(data.get('wwn')), 'type': data.get('volume-type'), 'total_capacity': int(total_size), 'free_capacit': int(total_size - total_avail), 'used_capacity': int(total_avail), 'blocks': int(blocks), 'compressed': True, 'deduplicated': True } list_volumes.append(volume_map) return list_volumes except Exception as e: err_msg = "Failed to get storage volume: %s" % (six.text_type(e)) LOG.error(err_msg) raise e def list_storage_pools(self, storage_id): try: pool_infos = self.ssh_pool.do_exec('show pools') pool_detail = self.handle_xml_to_json(pool_infos, 'pools') volume_list = self.list_storage_volume(storage_id) pools_list = [] for data in pool_detail: volume_size = 0 blocks = 0 if volume_list: for volume in volume_list: if volume.get('native_storage_pool_id') == data.\ get('serial-number'): volume_size += volume.get('total_capacity') blocks += volume.get('blocks') health = data.get('health') status = constants.StoragePoolStatus.OFFLINE if health == 'OK': status = constants.StoragePoolStatus.NORMAL total_size = self.parse_string_to_bytes( data.get('total-size')) pool_map = { 'name': data.get('name'), 'storage_id': storage_id, 'native_storage_pool_id': data.get('serial-number'), 'status': status, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': int(total_size), 'subscribed_capacity': int(blocks), 'used_capacity': volume_size, 'free_capacity': int(total_size - volume_size) } pools_list.append(pool_map) return pools_list except Exception as e: err_msg = "Failed to get storage pool: %s" % (six.text_type(e)) LOG.error(err_msg) raise e @staticmethod def parse_string_to_bytes(value): capacity = 0 if value: if value.isdigit(): capacity = float(value) else: if value == '0B': capacity = 0 else: unit = value[-2:] capacity = float(value[:-2]) * int( Tools.change_capacity_to_bytes(unit)) return capacity @staticmethod def handle_xml_to_json(detail_info, element): detail_arr = [] detail_data = detail_info.split('\n') detail = detail_data[1:len(detail_data) - 1] detail_xml = ''.join(detail) xml_element = Et.fromstring(detail_xml) for children in xml_element.iter('OBJECT'): property_name = children.get('basetype') if element == property_name: msg = {} for child in children.iter('PROPERTY'): msg[child.get('name')] = child.text detail_arr.append(msg) return detail_arr def list_alerts(self, query_para): alert_list = [] try: alert_infos = self.ssh_pool.do_exec('show events error') alert_json = self.handle_xml_to_json(alert_infos, 'events') for alert_map in alert_json: now = time.time() occur_time = int(round(now * consts.SecondsNumber .SECONDS_TO_MS)) time_stamp = alert_map.get('time-stamp-numeric') if time_stamp is not None: occur_time = int(time_stamp) * consts.SecondsNumber\ .SECONDS_TO_MS if not alert_util.is_alert_in_time_range(query_para, occur_time): continue event_code = alert_map.get('event-code') event_id = alert_map.get('event-id') location = alert_map.get('message') resource_type = alert_map.get('event-code') severity = alert_map.get('severity') additional_info = str(alert_map.get('additional-information')) match_key = None if event_code: match_key = event_code if severity: match_key += severity if location: match_key += location description = None if additional_info: description = additional_info if severity == 'Informational' or severity == 'RESOLVED': continue alert_model = { 'alert_id': event_id, 'alert_name': event_code, 'severity': severity, 'category': constants.Category.FAULT, 'type': 'EquipmentAlarm', 'sequence_number': event_id, 'occur_time': occur_time, 'description': description, 'resource_type': resource_type, 'location': location, 'match_key': hashlib.md5(match_key.encode()).hexdigest() } alert_list.append(alert_model) alert_list_data = SSHHandler.get_last_alert_data(alert_list) return alert_list_data except Exception as e: err_msg = "Failed to get storage alert: %s" % (six.text_type(e)) LOG.error(err_msg) raise e @staticmethod def get_last_alert_data(alert_json): alert_list = [] alert_json.sort(key=itemgetter('alert_name', 'location', 'severity')) for key, item in groupby(alert_json, key=itemgetter( 'alert_name', 'location', 'severity')): alert_last_index = 0 alert_list.append(list(item)[alert_last_index]) return alert_list @staticmethod def parse_alert(alert): try: alert_model = dict() alert_id = None description = None severity = consts.TrapSeverity.TRAP_SEVERITY_MAP.get('8') sequence_number = None event_type = None for alert_key, alert_value in alert.items(): if consts.AlertOIDNumber.OID_ERR_ID in alert_key: alert_id = str(alert_value) elif consts.AlertOIDNumber.OID_EVENT_TYPE in alert_key: event_type = alert_value elif consts.AlertOIDNumber.OID_EVENT_DESC in alert_key: description = alert_value elif consts.AlertOIDNumber.OID_SEVERITY in alert_key: severity = consts.TrapSeverity.TRAP_SEVERITY_MAP\ .get(alert.get(consts.AlertOIDNumber.OID_SEVERITY), constants.Severity.INFORMATIONAL) elif consts.AlertOIDNumber.OID_EVENT_ID in alert_key: sequence_number = alert_value if description: desc_arr = description.split(",") if desc_arr: alert_id = SSHHandler.split_by_char_and_number( desc_arr[0], ":", 1) alert_model['alert_id'] = str(alert_id) alert_model['alert_name'] = event_type alert_model['severity'] = severity alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = sequence_number now = time.time() alert_model['occur_time'] = int(round(now * consts. SecondsNumber.SECONDS_TO_MS)) alert_model['description'] = description alert_model['location'] = description return alert_model except Exception as e: LOG.error(e) msg = "Failed to build alert model: %s." % (six.text_type(e)) raise exception.InvalidResults(msg) @staticmethod def split_by_char_and_number(split_str, split_char, arr_number): split_value = '' if split_str: tmp_value = split_str.split(split_char, 1) if arr_number == 1 and len(tmp_value) > 1: split_value = tmp_value[arr_number].strip() elif arr_number == 0: split_value = tmp_value[arr_number].strip() return split_value @staticmethod def handle_xml_to_dict(xml_info, element): msg = {} xml_split = xml_info.split('\n') xml_data = xml_split[1:len(xml_split) - 1] detail_xml = ''.join(xml_data) xml_element = Et.fromstring(detail_xml) for children in xml_element.iter('OBJECT'): property_name = children.get('basetype') if element == property_name: for child in children.iter('PROPERTY'): msg[child.get('name')] = child.text return msg def list_storage_host_initiators(self, storage_id): try: initiator_list = [] host_groups_info = self.ssh_pool.do_exec("show initiators") host_groups_json = self.handle_xml_to_json(host_groups_info, "initiator") type_switch = { consts.InitiatorType.ISCSI_INITIATOR_TYPE: consts.InitiatorType.ISCSI_INITIATOR_DESCRIPTION, consts.InitiatorType.FC_INITIATOR_TYPE: consts.InitiatorType.FC_INITIATOR_DESCRIPTION, } for initiator in host_groups_json: description = type_switch.get( initiator.get('host-bus-type-numeric'), consts.InitiatorType.UNKNOWN_INITIATOR_DESCRIPTION) initiator_item = { "name": initiator.get('nickname'), "type": description, "alias": initiator.get('durable-id'), "storage_id": storage_id, "native_storage_host_initiator_id": initiator.get('durable-id'), "wwn": initiator.get('id'), "status": constants.InitiatorStatus.ONLINE, "native_storage_host_id": initiator.get('host-id') } initiator_list.append(initiator_item) return initiator_list except Exception as e: LOG.error("Failed to get initiator " "from msa storage_id: %s" % storage_id) raise e def list_storage_hosts(self, storage_id): try: hosts_info = self.ssh_pool.do_exec('show host-groups') host_list = [] hosts = self.handle_xml_to_json(hosts_info, 'host') host_set = set() for host in hosts: status = constants.HostStatus.NORMAL os_type = constants.HostOSTypes.HP_UX host_member_count = int(host.get('member-count')) if host_member_count > 0: serial_number = host.get('serial-number') if serial_number not in host_set: host_set.add(host.get('serial-number')) host_dict = { "name": host.get('name'), "description": host.get('durable-id'), "storage_id": storage_id, "native_storage_host_id": host.get('serial-number'), "os_type": os_type, "status": status } host_list.append(host_dict) return host_list except Exception as e: LOG.error("Failed to get host " "from msa storage_id: %s" % storage_id) raise e def list_storage_host_groups(self, storage_id): try: host_groups_info = self.ssh_pool.do_exec('show host-groups') host_group_list = [] storage_host_grp_relation_list = [] host_groups = self.handle_xml_to_json( host_groups_info, 'host-group') host_info_list = self.handle_xml_to_json(host_groups_info, 'host') for host_group in host_groups: member_count = int(host_group.get('member-count')) if member_count > 0: hosts_list = [] storage_host_group_id = host_group.get('serial-number') for host_info in host_info_list: host_id = host_info.get('serial-number') host_group_id = host_info.get('host-group') if host_id != 'NOHOST' and \ host_group_id == storage_host_group_id: hosts_list.append(host_id) storage_host_group_relation = { 'storage_id': storage_id, 'native_storage_host_group_id': storage_host_group_id, 'native_storage_host_id': host_id } storage_host_grp_relation_list.\ append(storage_host_group_relation) host_group_map = { "name": host_group.get('name'), "description": host_group.get('durable-id'), "storage_id": storage_id, "native_storage_host_group_id": storage_host_group_id, "storage_hosts": ','.join(hosts_list) } host_group_list.append(host_group_map) storage_host_groups_result = { 'storage_host_groups': host_group_list, 'storage_host_grp_host_rels': storage_host_grp_relation_list } return storage_host_groups_result except Exception as e: LOG.error("Failed to get host_group from msa " "storage_id: %s" % storage_id) raise e def list_volume_groups(self, storage_id): try: volume_group_list = [] volume_group_relation_list = [] volume_groups_info = self.ssh_pool.do_exec('show volume-groups') volume_groups_json = self.handle_xml_to_json( volume_groups_info, 'volume-groups') volumes_json = self.handle_xml_to_json( volume_groups_info, 'volumes') for volume_group in volume_groups_json: volumes_list = [] durable_id = volume_group.get('durable-id') if volumes_json: for volume_info in volumes_json: group_key = volume_info.get('group-key') volume_id = volume_info.get('durable-id') if group_key == durable_id: volumes_list.append(volume_id) volume_group_relation = { 'storage_id': storage_id, 'native_volume_group_id': durable_id, 'native_volume_id': volume_id } volume_group_relation_list.\ append(volume_group_relation) volume_groups_map = { "name": volume_group.get('group-name'), "description": volume_group.get('durable-id'), "storage_id": storage_id, "native_volume_group_id": durable_id, "volumes": ','.join(volumes_list) } volume_group_list.append(volume_groups_map) volume_group_result = { 'volume_groups': volume_group_list, 'vol_grp_vol_rels': volume_group_relation_list } return volume_group_result except Exception as e: LOG.error("Failed to get volume_group" " from msa storage_id: %s" % storage_id) raise e def list_port_groups(self, storage_id): try: port_group_list = [] port_group_relation_list = [] storage_view_info = self.ssh_pool.do_exec('show maps all ') storage_port_list = self.list_storage_ports(storage_id) storage_host_view = self.handle_xml_to_json( storage_view_info, 'volume-view-mappings') reduce_set = set() for storage_view in storage_host_view: port_number = storage_view.get('ports') port_group_dict = self.get_port_group_id_and_name( port_number, storage_port_list) native_port_group_id = port_group_dict.get( 'native_port_group_id') native_port_group_name = port_group_dict.get( 'native_port_group_name') if native_port_group_name: native_port_group_id = "port_group_" + \ native_port_group_id if native_port_group_id in reduce_set: continue reduce_set.add(native_port_group_id) port_group_map = { 'name': native_port_group_id, 'description': native_port_group_id, 'storage_id': storage_id, 'native_port_group_id': native_port_group_id, 'ports': native_port_group_name } port_ids = native_port_group_name.split(',') for port_id in port_ids: port_group_relation = { 'storage_id': storage_id, 'native_port_group_id': native_port_group_id, 'native_port_id': port_id } port_group_relation_list.append( port_group_relation) port_group_list.append(port_group_map) result = { 'port_groups': port_group_list, 'port_grp_port_rels': port_group_relation_list } return result except Exception as e: LOG.error("Failed to get port_group" " from msa storage_id: %s" % storage_id) raise e @staticmethod def get_port_group_id_and_name(port_number, storage_port_list): native_port_group_id = [] native_port_group_name = [] if port_number: port_codes = port_number.split(',') for port_code in port_codes: for port in storage_port_list: port_name = port.get('name') durable_id = port.get('native_port_id') if port_code in port_name: native_port_group_id.append(port_name) native_port_group_name.append(durable_id) port_group_dict = { 'native_port_group_id': ''.join(native_port_group_id), 'native_port_group_name': ','.join(native_port_group_name) } return port_group_dict def list_masking_views(self, storage_id): try: views_list = [] storage_view_info = self.ssh_pool.do_exec('show maps all ') if storage_view_info: storage_port_list = self.list_storage_ports(storage_id) host_list = self.list_storage_hosts(storage_id) initiators_list = self.list_storage_host_initiators(storage_id) host_group_list = self.list_storage_host_groups(storage_id) storage_host_group = host_group_list.get('storage_host_groups') storage_host_view = self.handle_xml_to_json( storage_view_info, 'volume-view-mappings') views_list.extend( self.get_storage_view_list(storage_host_view, 'volume', storage_id, storage_port_list, host_list, initiators_list, storage_host_group)) storage_host_volume_groups_view = self.handle_xml_to_json( storage_view_info, 'volume-group-view-mappings') views_list.extend(self.get_storage_view_list( storage_host_volume_groups_view, 'group', storage_id, storage_port_list, host_list, initiators_list, storage_host_group)) return views_list except Exception as e: LOG.error("Failed to get view " "from msa storage_id: %s" % storage_id) raise e def get_storage_view_list(self, storage_view_list, vol_type, storage_id, storage_port_list, host_list, initiators_list, storage_host_groups): views_list = [] if storage_view_list: native_volume_group_name = 'native_volume_group_id'\ if vol_type == 'group' else 'native_volume_id' for host_view in storage_view_list: access = host_view.get('access') if access != 'not-mapped': mapped_id = host_view.get('mapped-id') native_masking_view_id = host_view.get('durable-id') volume_id = host_view.get('parent-id') port_number = host_view.get('ports') view_name = host_view.get('nickname') host_group_name = 'native_storage_host_group_id'\ if '.*.*' in view_name else 'native_storage_host_id' native_port_group_dict = \ self.get_port_group_id_and_name(port_number, storage_port_list) native_port_group_id = native_port_group_dict.get( 'native_port_group_id') native_storage_host_id = self.get_storage_host_id( host_list, mapped_id, initiators_list, storage_host_groups, view_name) view_map = { "name": view_name, "description": view_name, "storage_id": storage_id, "native_masking_view_id": native_masking_view_id + volume_id, native_volume_group_name: volume_id, host_group_name: native_storage_host_id } if native_port_group_id: view_map['native_port_group_id'] = \ "port_group_" + native_port_group_id views_list.append(view_map) return views_list @staticmethod def get_storage_host_id(host_list, mapped_id, initiators_list, storage_host_groups, view_name): for host_value in host_list: host_durable_id = host_value.get('description') if host_durable_id == mapped_id: native_storage_host_id = \ host_value.get('native_storage_host_id') return native_storage_host_id for initiators in initiators_list: initiators_durable_id = initiators.get( 'native_storage_host_initiator_id') if initiators_durable_id == mapped_id: native_storage_host_id = \ initiators.get('native_storage_host_id') return native_storage_host_id group_name = view_name.split('.')[0] for host_group in storage_host_groups: if group_name == host_group.get('name'): native_storage_host_id = \ host_group.get('native_storage_host_group_id') return native_storage_host_id ================================================ FILE: delfin/drivers/huawei/__init__.py ================================================ ================================================ FILE: delfin/drivers/huawei/oceanstor/__init__.py ================================================ ================================================ FILE: delfin/drivers/huawei/oceanstor/alert_handler.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from oslo_log import log from delfin import exception from delfin.common import alert_util from delfin.common import constants from delfin.drivers.huawei.oceanstor import oid_mapper from delfin.i18n import _ LOG = log.getLogger(__name__) class AlertHandler(object): """Alert handling functions for huawei oceanstor driver""" TIME_PATTERN = "%Y-%m-%d,%H:%M:%S.%f" # Translation of trap severity to alert model severity SEVERITY_MAP = {"1": constants.Severity.CRITICAL, "2": constants.Severity.MAJOR, "3": constants.Severity.MINOR, "4": constants.Severity.WARNING} # Translation of trap alert category to alert model category CATEGORY_MAP = {"1": constants.Category.FAULT, "2": constants.Category.RECOVERY, "3": constants.Category.EVENT} # Translation of trap alert category to alert type TYPE_MAP = { "1": constants.EventType.COMMUNICATIONS_ALARM, "2": constants.EventType.EQUIPMENT_ALARM, "3": constants.EventType.PROCESSING_ERROR_ALARM, "4": constants.EventType.QUALITY_OF_SERVICE_ALARM, "5": constants.EventType.ENVIRONMENTAL_ALARM, "6": constants.EventType.QUALITY_OF_SERVICE_ALARM} # Translation of severity of queried alerts to alert model severity QUERY_ALERTS_SEVERITY_MAP = {2: constants.Severity.INFORMATIONAL, 3: constants.Severity.WARNING, 5: constants.Severity.MAJOR, 6: constants.Severity.CRITICAL} # Translation of alert category of queried alerts to alert model category QUERY_ALERTS_CATEGORY_MAP = {0: constants.Category.EVENT, 1: constants.Category.FAULT, 2: constants.Category.RECOVERY} # Attributes expected in alert info to proceed with model filling _mandatory_alert_attributes = ('hwIsmReportingAlarmAlarmID', 'hwIsmReportingAlarmFaultTitle', 'hwIsmReportingAlarmFaultLevel', 'hwIsmReportingAlarmNodeCode', 'hwIsmReportingAlarmFaultType', 'hwIsmReportingAlarmAdditionInfo', 'hwIsmReportingAlarmSerialNo', 'hwIsmReportingAlarmFaultCategory', 'hwIsmReportingAlarmRestoreAdvice', 'hwIsmReportingAlarmFaultTime' ) @staticmethod def parse_alert(context, alert): """Parse alert data and fill the alert model.""" # Check for mandatory alert attributes alert = oid_mapper.OidMapper.map_oids(alert) LOG.info("Get alert from storage: %s", alert) for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr raise exception.InvalidInput(msg) try: alert_model = dict() # These information are sourced from device registration info alert_model['alert_id'] = alert['hwIsmReportingAlarmAlarmID'] alert_model['alert_name'] = alert['hwIsmReportingAlarmFaultTitle'] alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert['hwIsmReportingAlarmFaultLevel'], constants.Severity.NOT_SPECIFIED) alert_model['category'] = AlertHandler.CATEGORY_MAP.get( alert['hwIsmReportingAlarmFaultCategory'], constants.Category.NOT_SPECIFIED) alert_model['type'] = AlertHandler.TYPE_MAP.get( alert['hwIsmReportingAlarmFaultType'], constants.EventType.NOT_SPECIFIED) alert_model['sequence_number'] \ = alert['hwIsmReportingAlarmSerialNo'] occur_time = datetime.strptime( alert['hwIsmReportingAlarmFaultTime'], AlertHandler.TIME_PATTERN) alert_model['occur_time'] = int(occur_time.timestamp() * 1000) description = alert['hwIsmReportingAlarmAdditionInfo'] if AlertHandler._is_hex(description): description = bytes.fromhex(description[2:]).decode('ascii') alert_model['description'] = description recovery_advice = alert['hwIsmReportingAlarmRestoreAdvice'] if AlertHandler._is_hex(recovery_advice): recovery_advice = bytes.fromhex( recovery_advice[2:]).decode('ascii') alert_model['recovery_advice'] = recovery_advice alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['location'] = 'Node code=' \ + alert['hwIsmReportingAlarmNodeCode'] if alert.get('hwIsmReportingAlarmLocationInfo'): alert_model['location'] \ = alert_model['location'] + ',' + alert[ 'hwIsmReportingAlarmLocationInfo'] return alert_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing " "in alert message.")) raise exception.InvalidResults(msg) def parse_queried_alerts(self, alert_list, query_para): """Parses list alert data and fill the alert model.""" # List contains all the current alarms of given storage id alert_model_list = [] for alert in alert_list: try: occur_time = alert['startTime'] # skip if alert not in input time range if not alert_util.is_alert_in_time_range(query_para, occur_time): continue alert_model = dict() alert_model['alert_id'] = alert['eventID'] alert_model['alert_name'] = alert['name'] alert_model['severity'] = self.QUERY_ALERTS_SEVERITY_MAP.get( alert['level'], constants.Severity.NOT_SPECIFIED) alert_model['category'] = self.QUERY_ALERTS_CATEGORY_MAP.get( alert['eventType'], constants.Category.NOT_SPECIFIED) alert_model['type'] = constants.EventType.NOT_SPECIFIED alert_model['sequence_number'] = alert['sequence'] alert_model['occur_time'] = int(occur_time * 1000) alert_model['description'] = alert['description'] alert_model['recovery_advice'] = alert['suggestion'] alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['location'] = alert['location'] alert_model_list.append(alert_model) except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes" " missing in queried alerts.")) raise exception.InvalidResults(msg) return alert_model_list def add_trap_config(self, context, storage_id, trap_config): """Config the trap receiver in storage system.""" # Currently not implemented pass def remove_trap_config(self, context, storage_id, trap_config): """Remove trap receiver configuration from storage system.""" # Currently not implemented pass def clear_alert(self, context, storage_id, alert): # Currently not implemented """Clear alert from storage system.""" pass @staticmethod def _is_hex(value): try: int(value, 16) except ValueError: return False return True ================================================ FILE: delfin/drivers/huawei/oceanstor/consts.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from delfin.common import constants STATUS_HEALTH = '1' STATUS_ACTIVE = '43' STATUS_RUNNING = '10' STATUS_VOLUME_READY = '27' STATUS_LUNCOPY_READY = '40' STATUS_QOS_ACTIVE = '2' QOS_INACTIVATED = '45' LUN_TYPE = '11' SNAPSHOT_TYPE = '27' STATUS_POOL_ONLINE = '27' STATUS_STORAGE_NORMAL = '1' STATUS_CTRLR_OFFLINE = '28' STATUS_CTRLR_UNKNOWN = '0' PORT_TYPE_FC = '212' PORT_TYPE_ETH = '213' PORT_TYPE_SAS = '214' PORT_TYPE_FCOE = '252' PORT_TYPE_PCIE = '233' PORT_TYPE_BOND = '235' PORT_LOGICTYPE_HOST = '0' PORT_HEALTH_UNKNOWN = '0' PORT_HEALTH_NORMAL = '1' PORT_HEALTH_FAULTY = '2' PORT_HEALTH_ABOUTFAIL = '3' PORT_HEALTH_PARTIALLYDAMAGED = '4' PORT_HEALTH_INCONSISTENT = '9' PORT_RUNNINGSTS_UNKNOWN = '0' PORT_RUNNINGSTS_NORMAL = '1' PORT_RUNNINGSTS_RUNNING = '2' PORT_RUNNINGSTS_LINKUP = '10' PORT_RUNNINGSTS_LINKDOWN = '11' PORT_RUNNINGSTS_TOBERECOVERED = '33' PORT_LOGICTYPE_EXPANSION = '1' PORT_LOGICTYPE_MANAGEMENT = '2' PORT_LOGICTYPE_INTERNAL = '3' PORT_LOGICTYPE_MAINTENANCE = '4' PORT_LOGICTYPE_SERVICE = '5' PORT_LOGICTYPE_MAINTENANCE2 = '6' PORT_LOGICTYPE_INTERCONNECT = '11' PortTypeMap = { PORT_TYPE_FC: constants.PortType.FC, PORT_TYPE_FCOE: constants.PortType.FCOE, PORT_TYPE_ETH: constants.PortType.ETH, PORT_TYPE_PCIE: constants.PortType.OTHER, PORT_TYPE_SAS: constants.PortType.SAS, PORT_TYPE_BOND: constants.PortType.OTHER, } PortLogicTypeMap = { PORT_LOGICTYPE_HOST: constants.PortLogicalType.SERVICE, PORT_LOGICTYPE_EXPANSION: constants.PortLogicalType.OTHER, PORT_LOGICTYPE_MANAGEMENT: constants.PortLogicalType.MANAGEMENT, PORT_LOGICTYPE_INTERNAL: constants.PortLogicalType.INTERNAL, PORT_LOGICTYPE_MAINTENANCE: constants.PortLogicalType.MAINTENANCE, PORT_LOGICTYPE_SERVICE: constants.PortLogicalType.SERVICE, PORT_LOGICTYPE_MAINTENANCE2: constants.PortLogicalType.MAINTENANCE, PORT_LOGICTYPE_INTERCONNECT: constants.PortLogicalType.INTERCONNECT, } DISK_STATUS_UNKNOWN = '0' DISK_STATUS_NORMAL = '1' DISK_STATUS_OFFLINE = '28' DISK_TYPE_SAS = '1' DISK_TYPE_SATA = '2' DISK_TYPE_SSD = '3' DISK_LOGICTYPE_FREE = '1' DISK_LOGICTYPE_MEMBER = '2' DISK_LOGICTYPE_HOTSPARE = '3' DISK_LOGICTYPE_CACHE = '4' DiskPhysicalTypeMap = { DISK_TYPE_SATA: constants.DiskPhysicalType.SATA, DISK_TYPE_SAS: constants.DiskPhysicalType.SAS, DISK_TYPE_SSD: constants.DiskPhysicalType.SSD, } DiskLogicalTypeMap = { DISK_LOGICTYPE_FREE: constants.DiskLogicalType.FREE, DISK_LOGICTYPE_MEMBER: constants.DiskLogicalType.MEMBER, DISK_LOGICTYPE_HOTSPARE: constants.DiskLogicalType.HOTSPARE, DISK_LOGICTYPE_CACHE: constants.DiskLogicalType.CACHE, } FS_WORM_COMPLIANCE = '1' FS_WORM_AUDIT_LOG = '2' FS_WORM_ENTERPRISE = '3' FS_HEALTH_NORMAL = '1' FS_TYPE_THICK = '0' FS_TYPE_THIN = '1' PARENT_TYPE_POOL = 216 QUOTA_NOT_ENABLED = 'INVALID_VALUE64' QUOTA_TYPE_TREE = '1' QUOTA_TYPE_USER = '2' QUOTA_TYPE_GROUP = '3' SECURITY_STYLE_MIXED = '0' SECURITY_STYLE_NATIVE = '1' SECURITY_STYLE_NTFS = '2' SECURITY_STYLE_UNIX = '3' PARENT_OBJECT_TYPE_FS = 40 SHARE_NFS = '16401' ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 SOCKET_TIMEOUT = 52 LOGIN_SOCKET_TIMEOUT = 4 ERROR_VOLUME_NOT_EXIST = 1077939726 RELOGIN_ERROR_PASS = [ERROR_VOLUME_NOT_EXIST] PWD_EXPIRED = 3 PWD_RESET = 4 BLOCK_STORAGE_POOL_TYPE = '1' FILE_SYSTEM_POOL_TYPE = '2' SECTORS_SIZE = 512 QUERY_PAGE_SIZE = 100 THICK_LUNTYPE = '0' THIN_LUNTYPE = '1' HOST_OS = [ constants.HostOSTypes.LINUX, constants.HostOSTypes.WINDOWS, constants.HostOSTypes.SOLARIS, constants.HostOSTypes.HP_UX, constants.HostOSTypes.AIX, constants.HostOSTypes.XEN_SERVER, constants.HostOSTypes.VMWARE_ESX, constants.HostOSTypes.LINUX_VIS, constants.HostOSTypes.WINDOWS_SERVER_2012, constants.HostOSTypes.ORACLE_VM, constants.HostOSTypes.OPEN_VMS, ] HOST_RUNNINGSTATUS_NORMAL = '1' INITIATOR_RUNNINGSTATUS_UNKNOWN = '0' INITIATOR_RUNNINGSTATUS_ONLINE = '27' INITIATOR_RUNNINGSTATUS_OFFLINE = '28' ISCSI_INITIATOR_TYPE = 222 FC_INITIATOR_TYPE = 223 IB_INITIATOR_TYPE = 16499 ISCSI_INITIATOR_DESCRIPTION = 'iSCSI Initiator' FC_INITIATOR_DESCRIPTION = 'FC Initiator' IB_INITIATOR_DESCRIPTION = 'IB Initiator' UNKNOWN_INITIATOR_DESCRIPTION = 'Unknown Initiator' OCEANSTOR_METRICS = { 'iops': '22', 'readIops': '25', 'writeIops': '28', 'throughput': '21', 'readThroughput': '23', 'writeThroughput': '26', 'responseTime': '370', 'ioSize': '228', 'readIoSize': '24', 'writeIoSize': '27', 'cacheHitRatio': '303', 'readCacheHitRatio': '93', 'writeCacheHitRatio': '95', } CONVERT_TO_MILLI_SECOND_LIST = [ 'responseTime' ] IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Input/output operations per second" } READ_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Read input/output operations per second" } WRITE_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Write input/output operations per second" } THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data is " "successfully transferred in MB/s" } READ_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data read is " "successfully transferred in MB/s" } WRITE_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data write is " "successfully transferred in MB/s" } RESPONSE_TIME_DESCRIPTION = { "unit": "ms", "description": "Average time taken for an IO " "operation in ms" } CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of io that are cache hits" } READ_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of read ops that are cache hits" } WRITE_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of write ops that are cache hits" } IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of IO requests in KB" } READ_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of read IO requests in KB" } WRITE_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of write IO requests in KB" } CPU_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of CPU usage" } MEMORY_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of DISK memory usage in percentage" } SERVICE_TIME = { "unit": 'ms', "description": "Service time of the resource in ms" } POOL_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } VOLUME_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "cacheHitRatio": CACHE_HIT_RATIO_DESCRIPTION, "readCacheHitRatio": READ_CACHE_HIT_RATIO_DESCRIPTION, "writeCacheHitRatio": WRITE_CACHE_HIT_RATIO_DESCRIPTION, "ioSize": IO_SIZE_DESCRIPTION, "readIoSize": READ_IO_SIZE_DESCRIPTION, "writeIoSize": WRITE_IO_SIZE_DESCRIPTION, } CONTROLLER_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } PORT_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } DISK_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } ================================================ FILE: delfin/drivers/huawei/oceanstor/oceanstor.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log from delfin.common import constants from delfin.drivers.huawei.oceanstor import rest_client, consts, alert_handler from delfin.drivers import driver LOG = log.getLogger(__name__) CONF = cfg.CONF oceanstor_opts = [ cfg.StrOpt( 'enable_perf_config', default=False, help='Enable changing performance configs on storage array' 'Settings for real-time, historical collection updated'), ] CONF.register_opts(oceanstor_opts, "oceanstor_driver") class OceanStorDriver(driver.StorageDriver): """OceanStorDriver implement Huawei OceanStor driver, """ def __init__(self, **kwargs): super().__init__(**kwargs) self.client = rest_client.RestClient(**kwargs) self.sector_size = consts.SECTORS_SIZE self.init_perf_config = CONF.oceanstor_driver.enable_perf_config def reset_connection(self, context, **kwargs): self.client.reset_connection(**kwargs) def get_storage(self, context): storage = self.client.get_storage() # Get firmware version controller = self.client.get_all_controllers() firmware_ver = controller[0]['SOFTVER'] # Get status status = constants.StorageStatus.OFFLINE if storage['RUNNINGSTATUS'] == consts.STATUS_STORAGE_NORMAL: status = constants.StorageStatus.NORMAL # Keep sector_size for use in list pools self.sector_size = int(storage['SECTORSIZE']) total_cap = int(storage['TOTALCAPACITY']) * self.sector_size used_cap = int(storage['USEDCAPACITY']) * self.sector_size free_cap = int(storage['userFreeCapacity']) * self.sector_size raw_cap = int(storage['MEMBERDISKSCAPACITY']) * self.sector_size s = { 'name': 'OceanStor', 'vendor': 'Huawei', 'description': 'Huawei OceanStor Storage', 'model': storage['NAME'], 'status': status, 'serial_number': storage['ID'], 'firmware_version': firmware_ver, 'location': storage['LOCATION'], 'total_capacity': total_cap, 'used_capacity': used_cap, 'free_capacity': free_cap, 'raw_capacity': raw_cap } LOG.info("get_storage(), successfully retrieved storage details") return s def list_storage_pools(self, context): try: # Get list of OceanStor pool details pools = self.client.get_all_pools() pool_list = [] for pool in pools: # Get pool status status = constants.StoragePoolStatus.OFFLINE if pool['RUNNINGSTATUS'] == consts.STATUS_POOL_ONLINE: status = constants.StoragePoolStatus.NORMAL # Get pool storage_type storage_type = constants.StorageType.BLOCK if pool.get('USAGETYPE') == consts.FILE_SYSTEM_POOL_TYPE: storage_type = constants.StorageType.FILE total_cap = \ int(pool['USERTOTALCAPACITY']) * self.sector_size used_cap = \ int(pool['USERCONSUMEDCAPACITY']) * self.sector_size free_cap = \ int(pool['USERFREECAPACITY']) * self.sector_size p = { 'name': pool['NAME'], 'storage_id': self.storage_id, 'native_storage_pool_id': pool['ID'], 'description': 'Huawei OceanStor Pool', 'status': status, 'storage_type': storage_type, 'total_capacity': total_cap, 'used_capacity': used_cap, 'free_capacity': free_cap, } pool_list.append(p) return pool_list except Exception: LOG.error("Failed to get pool metrics from OceanStor") raise def _get_orig_pool_id(self, pools, volume): for pool in pools: if volume['PARENTNAME'] == pool['NAME']: return pool['ID'] return '' def list_volumes(self, context): try: # Get all volumes in OceanStor volumes = self.client.get_all_volumes() pools = self.client.get_all_pools() volume_list = [] for volume in volumes: # Get pool id of volume orig_pool_id = self._get_orig_pool_id(pools, volume) compressed = False if volume['ENABLECOMPRESSION'] != 'false': compressed = True deduplicated = False if volume['ENABLEDEDUP'] != 'false': deduplicated = True status = constants.VolumeStatus.ERROR if volume['RUNNINGSTATUS'] == consts.STATUS_VOLUME_READY: status = constants.VolumeStatus.AVAILABLE vol_type = constants.VolumeType.THICK if volume['ALLOCTYPE'] == consts.THIN_LUNTYPE: vol_type = constants.VolumeType.THIN sector_size = int(volume['SECTORSIZE']) total_cap = int(volume['CAPACITY']) * sector_size used_cap = int(volume['ALLOCCAPACITY']) * sector_size v = { 'name': volume['NAME'], 'storage_id': self.storage_id, 'description': 'Huawei OceanStor volume', 'status': status, 'native_volume_id': volume['ID'], 'native_storage_pool_id': orig_pool_id, 'wwn': volume['WWN'], 'type': vol_type, 'total_capacity': total_cap, 'used_capacity': used_cap, 'free_capacity': None, 'compressed': compressed, 'deduplicated': deduplicated, } volume_list.append(v) return volume_list except Exception: LOG.error("Failed to get list volumes from OceanStor") raise def list_controllers(self, context): try: # Get list of OceanStor controller details controllers = self.client.get_all_controllers() controller_list = [] for controller in controllers: status = constants.ControllerStatus.NORMAL if controller['RUNNINGSTATUS'] == consts.STATUS_CTRLR_UNKNOWN: status = constants.ControllerStatus.UNKNOWN if controller['RUNNINGSTATUS'] == consts.STATUS_CTRLR_OFFLINE: status = constants.ControllerStatus.OFFLINE c = { 'name': controller['NAME'], 'storage_id': self.storage_id, 'native_controller_id': controller['ID'], 'status': status, 'location': controller['LOCATION'], 'soft_version': controller['SOFTVER'], 'cpu_info': controller['CPUINFO'], 'memory_size': controller['MEMORYSIZE'], } controller_list.append(c) return controller_list except Exception: LOG.error("Failed to get controller metrics from OceanStor") raise def list_ports(self, context): try: # Get list of OceanStor port details ports = self.client.get_all_ports() port_list = [] for port in ports: health_status = constants.PortHealthStatus.ABNORMAL conn_status = constants.PortConnectionStatus.CONNECTED logical_type = consts.PortLogicTypeMap.get( port.get('LOGICTYPE'), constants.PortLogicalType.OTHER) if port['HEALTHSTATUS'] == consts.PORT_HEALTH_UNKNOWN: health_status = constants.PortHealthStatus.UNKNOWN if port['HEALTHSTATUS'] == consts.PORT_HEALTH_NORMAL: health_status = constants.PortHealthStatus.UNKNOWN if port['RUNNINGSTATUS'] == consts.PORT_RUNNINGSTS_UNKNOWN: conn_status = constants.PortConnectionStatus.UNKNOWN if port['RUNNINGSTATUS'] == consts.PORT_RUNNINGSTS_LINKDOWN: conn_status = constants.PortConnectionStatus.DISCONNECTED speed = port.get('RUNSPEED') # ether -1 or M bits/sec if speed == '-1': speed = None max_speed = port.get('MAXSPEED') port_type = consts.PortTypeMap.get(port['TYPE'], constants.PortType.OTHER) # FC if port['TYPE'] == consts.PORT_TYPE_FC: max_speed = port['MAXSUPPORTSPEED'] # in 1000 M bits/s # Ethernet if port['TYPE'] == consts.PORT_TYPE_ETH: max_speed = port['maxSpeed'] # in M bits/s speed = port['SPEED'] # in M bits/s # PCIE if port['TYPE'] == consts.PORT_TYPE_PCIE: speed = port['PCIESPEED'] logical_type = constants.PortLogicalType.OTHER p = { 'name': port['NAME'], 'storage_id': self.storage_id, 'native_port_id': port['ID'], 'location': port.get('LOCATION'), 'connection_status': conn_status, 'health_status': health_status, 'type': port_type, 'logical_type': logical_type, 'speed': speed, 'max_speed': max_speed, 'native_parent_id': port.get('PARENTID'), 'wwn': port.get('WWN'), 'mac_address': port.get('MACADDRESS'), 'ipv4': port.get('IPV4ADDR'), 'ipv4_mask': port.get('IPV4MASK'), 'ipv6': port.get('IPV6ADDR'), 'ipv6_mask': port.get('IPV6MASK'), } port_list.append(p) return port_list except Exception: LOG.error("Failed to get port metrics from OceanStor") raise def list_disks(self, context): try: # Get list of OceanStor disks details disks = self.client.get_all_disks() disk_list = [] for disk in disks: status = constants.DiskStatus.NORMAL if disk['RUNNINGSTATUS'] == consts.DISK_STATUS_OFFLINE: status = constants.DiskStatus.OFFLINE if disk['RUNNINGSTATUS'] == consts.DISK_STATUS_UNKNOWN: status = constants.DiskStatus.ABNORMAL physical_type = consts.DiskPhysicalTypeMap.get( disk['DISKTYPE'], constants.DiskPhysicalType.UNKNOWN) logical_type = consts.DiskLogicalTypeMap.get( disk['LOGICTYPE'], constants.DiskLogicalType.UNKNOWN) health_score = disk['HEALTHMARK'] capacity = int(disk['SECTORS']) * int(disk['SECTORSIZE']) d = { 'name': disk['MODEL'] + ':' + disk['SERIALNUMBER'], 'storage_id': self.storage_id, 'native_disk_id': disk['ID'], 'serial_number': disk['SERIALNUMBER'], 'manufacturer': disk['MANUFACTURER'], 'model': disk['MODEL'], 'firmware': disk['FIRMWAREVER'], 'speed': int(disk['SPEEDRPM']), 'capacity': capacity, 'status': status, 'physical_type': physical_type, 'logical_type': logical_type, 'health_score': health_score, 'native_disk_group_id': None, 'location': disk['LOCATION'], } disk_list.append(d) return disk_list except Exception: LOG.error("Failed to get disk metrics from OceanStor") raise def _list_quotas(self, quotas, fs_id, qt_id): q_type = { consts.QUOTA_TYPE_TREE: constants.QuotaType.TREE, consts.QUOTA_TYPE_USER: constants.QuotaType.USER, consts.QUOTA_TYPE_GROUP: constants.QuotaType.GROUP, } q_list = [] for qt in quotas: chq, csq, fhq, fsq = None, None, None, None uc, fc = None, None if qt['SPACEHARDQUOTA'] != consts.QUOTA_NOT_ENABLED: chq = qt['SPACEHARDQUOTA'] if qt['SPACESOFTQUOTA'] != consts.QUOTA_NOT_ENABLED: csq = qt['SPACESOFTQUOTA'] if qt['FILEHARDQUOTA'] != consts.QUOTA_NOT_ENABLED: fhq = qt['FILEHARDQUOTA'] if qt['FILESOFTQUOTA'] != consts.QUOTA_NOT_ENABLED: fsq = qt['FILESOFTQUOTA'] if qt['SPACEUSED'] != consts.QUOTA_NOT_ENABLED: uc = qt['SPACEUSED'] if qt['FILEUSED'] != consts.QUOTA_NOT_ENABLED: fc = qt['FILEUSED'] q = { "native_quota_id": qt['ID'], "type": q_type.get(qt['QUOTATYPE']), "storage_id": self.storage_id, "native_filesystem_id": fs_id, "native_qtree_id": qt_id, "capacity_hard_limit": chq, "capacity_soft_limit": csq, "file_hard_limit": fhq, "file_soft_limit": fsq, "file_count": fc, "used_capacity": uc, "user_group_name": qt['USRGRPOWNERNAME'], } q_list.append(q) return q_list def list_quotas(self, context): try: # Get list of OceanStor quotas details quotas_list = [] filesystems = self.client.get_all_filesystems() for fs in filesystems: fs_id = fs["ID"] quotas = self.client.get_all_filesystem_quotas(fs_id) if quotas: qs = self._list_quotas(quotas, fs_id, None) quotas_list.extend(qs) qtrees = self.client.get_all_qtrees(filesystems) for qt in qtrees: qt_id = qt["ID"] quotas = self.client.get_all_qtree_quotas(qt_id) if quotas: qs = self._list_quotas(quotas, None, qt_id) quotas_list.extend(qs) return quotas_list except Exception: LOG.error("Failed to get quotas from OceanStor") raise def list_filesystems(self, context): try: # Get list of OceanStor filesystems details fss = self.client.get_all_filesystems() fs_list = [] worm_type = { consts.FS_WORM_COMPLIANCE: constants.WORMType.COMPLIANCE, consts.FS_WORM_AUDIT_LOG: constants.WORMType.AUDIT_LOG, consts.FS_WORM_ENTERPRISE: constants.WORMType.ENTERPRISE } for fs in fss: status = constants.FilesystemStatus.FAULTY if fs['HEALTHSTATUS'] == consts.FS_HEALTH_NORMAL: status = constants.FilesystemStatus.NORMAL fs_type = constants.FSType.THICK if fs['ALLOCTYPE'] == consts.FS_TYPE_THIN: fs_type = constants.FSType.THIN pool_id = None if fs['PARENTTYPE'] == consts.PARENT_TYPE_POOL: pool_id = fs['PARENTID'] sector_size = int(fs['SECTORSIZE']) total_cap = int(fs['CAPACITY']) * sector_size used_cap = int(fs['ALLOCCAPACITY']) * sector_size free_cap = int(fs['AVAILABLECAPCITY']) * sector_size compressed = False if fs['ENABLECOMPRESSION'] != 'false': compressed = True deduplicated = False if fs['ENABLEDEDUP'] != 'false': deduplicated = True f = { 'name': fs['NAME'], 'storage_id': self.storage_id, 'native_filesystem_id': fs['ID'], 'native_pool_id': pool_id, 'compressed': compressed, 'deduplicated': deduplicated, 'worm': worm_type.get(fs['WORMTYPE'], constants.WORMType.NON_WORM), 'status': status, 'type': fs_type, 'total_capacity': total_cap, 'used_capacity': used_cap, 'free_capacity': free_cap, } fs_list.append(f) return fs_list except Exception: LOG.error("Failed to get filesystems from OceanStor") raise def list_qtrees(self, context): try: # Get list of OceanStor qtrees details filesystems = self.client.get_all_filesystems() qts = self.client.get_all_qtrees(filesystems) security_mode = { consts.SECURITY_STYLE_MIXED: constants.NASSecurityMode.MIXED, consts.SECURITY_STYLE_NATIVE: constants.NASSecurityMode.NATIVE, consts.SECURITY_STYLE_NTFS: constants.NASSecurityMode.NTFS, consts.SECURITY_STYLE_UNIX: constants.NASSecurityMode.UNIX, } qt_list = [] for qt in qts: fs_id = None if qt['PARENTTYPE'] == consts.PARENT_OBJECT_TYPE_FS: fs_id = qt['PARENTID'] q = { 'name': qt['NAME'], 'storage_id': self.storage_id, 'native_qtree_id': qt['ID'], 'native_filesystem_id': fs_id, 'security_mode': security_mode.get(qt['securityStyle']), } qt_list.append(q) return qt_list except Exception: LOG.error("Failed to get qtrees from OceanStor") raise def list_shares(self, context): try: # Get list of OceanStor shares details ss = self.client.get_all_shares() s_list = [] for s in ss: protocol = None if s.get('type') == consts.SHARE_NFS: protocol = constants.ShareProtocol.NFS if s.get('subType'): protocol = constants.ShareProtocol.CIFS if s.get('ACCESSNAME'): protocol = constants.ShareProtocol.FTP s = { 'name': s['NAME'], 'storage_id': self.storage_id, 'native_share_id': s['ID'], 'native_filesystem_id': s['FSID'], 'path': s['SHAREPATH'], 'protocol': protocol } s_list.append(s) return s_list except Exception: LOG.error("Failed to get shares from OceanStor") raise def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return alert_handler.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, sequence_number): return self.client.clear_alert(sequence_number) def list_alerts(self, context, query_para): # First query alerts and then translate to model alert_list = self.client.list_alerts() alert_model_list = alert_handler.AlertHandler()\ .parse_queried_alerts(alert_list, query_para) return alert_model_list def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): """Collects performance metric for the given interval""" try: if self.init_perf_config: self.client.configure_metrics_collection() self.init_perf_config = False except Exception: LOG.error("Failed to configure collection in OceanStor") raise metrics = [] try: # storage-pool metrics if resource_metrics.get(constants.ResourceType.STORAGE_POOL): pool_metrics = self.client.get_pool_metrics( storage_id, resource_metrics.get(constants.ResourceType.STORAGE_POOL)) metrics.extend(pool_metrics) # volume metrics if resource_metrics.get(constants.ResourceType.VOLUME): volume_metrics = self.client.get_volume_metrics( storage_id, resource_metrics.get(constants.ResourceType.VOLUME)) metrics.extend(volume_metrics) # controller metrics if resource_metrics.get(constants.ResourceType.CONTROLLER): controller_metrics = self.client.get_controller_metrics( storage_id, resource_metrics.get(constants.ResourceType.CONTROLLER)) metrics.extend(controller_metrics) # port metrics if resource_metrics.get(constants.ResourceType.PORT): port_metrics = self.client.get_port_metrics( storage_id, resource_metrics.get(constants.ResourceType.PORT)) metrics.extend(port_metrics) # disk metrics if resource_metrics.get(constants.ResourceType.DISK): disk_metrics = self.client.get_disk_metrics( storage_id, resource_metrics.get(constants.ResourceType.DISK)) metrics.extend(disk_metrics) except Exception: LOG.error("Failed to collect metrics from OceanStor") raise return metrics @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver""" return { 'is_historic': False, 'resource_metrics': { constants.ResourceType.STORAGE_POOL: consts.POOL_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP } } def list_storage_host_initiators(self, ctx): try: # Get list of OceanStor initiators details initiators = self.client.get_all_initiators() initiator_list = [] switcher = { consts.INITIATOR_RUNNINGSTATUS_ONLINE: constants.InitiatorStatus.ONLINE, consts.INITIATOR_RUNNINGSTATUS_OFFLINE: constants.InitiatorStatus.OFFLINE, consts.INITIATOR_RUNNINGSTATUS_UNKNOWN: constants.InitiatorStatus.UNKNOWN, } type_switch = { consts.ISCSI_INITIATOR_TYPE: consts.ISCSI_INITIATOR_DESCRIPTION, consts.FC_INITIATOR_TYPE: consts.FC_INITIATOR_DESCRIPTION, consts.IB_INITIATOR_TYPE: consts.IB_INITIATOR_DESCRIPTION, } for initiator in initiators: status = switcher.get(initiator['RUNNINGSTATUS'], constants.InitiatorStatus.UNKNOWN) description = type_switch.get( initiator['TYPE'], consts.UNKNOWN_INITIATOR_DESCRIPTION) initiator_item = { "name": initiator.get('NAME'), "description": description, "alias": initiator['ID'], "storage_id": self.storage_id, "native_storage_host_initiator_id": initiator['ID'], "wwn": initiator['ID'], "status": status, "native_storage_host_id": initiator.get('PARENTID'), } initiator_list.append(initiator_item) return initiator_list except Exception: LOG.error("Failed to get initiators from OceanStor") raise def list_storage_hosts(self, ctx): try: # Get list of OceanStor host details hosts = self.client.get_all_hosts() host_list = [] for host in hosts: os_type = '' host_os = int(host['OPERATIONSYSTEM']) if host_os < len(consts.HOST_OS): os_type = consts.HOST_OS[host_os] status = constants.HostStatus.NORMAL if host['RUNNINGSTATUS'] != consts.HOST_RUNNINGSTATUS_NORMAL: status = constants.HostStatus.ABNORMAL h = { "name": host['NAME'], "description": host['DESCRIPTION'], "storage_id": self.storage_id, "native_storage_host_id": host['ID'], "os_type": os_type, "status": status, "ip_address": host['IP'] } host_list.append(h) return host_list except Exception: LOG.error("Failed to get host metrics from OceanStor") raise def list_storage_host_groups(self, ctx): try: # Get list of OceanStor host_groups details host_groups = self.client.get_all_host_groups() host_group_list = [] for host_group in host_groups: hosts = self.client.get_all_associate_hosts( host_group['TYPE'], host_group['ID']) hosts_str = None for host in hosts: if hosts_str: hosts_str = "{0},{1}".format(hosts_str, host['ID']) else: hosts_str = "{0}".format(host['ID']) host_g = { "name": host_group['NAME'], "description": host_group['DESCRIPTION'], "storage_id": self.storage_id, "native_storage_host_group_id": host_group['ID'], "storage_hosts": hosts_str } host_group_list.append(host_g) return host_group_list except Exception: LOG.error("Failed to get host_groups from OceanStor") raise def list_port_groups(self, ctx): try: # Get list of OceanStor port_groups details port_groups = self.client.get_all_port_groups() port_group_list = [] for port_group in port_groups: ports = self.client.get_all_associate_ports( port_group['TYPE'], port_group['ID']) ports_str = None for port in ports: if ports_str: ports_str = "{0},{1}".format(ports_str, port['ID']) else: ports_str = "{0}".format(port['ID']) port_g = { "name": port_group['NAME'], "description": port_group['DESCRIPTION'], "storage_id": self.storage_id, "native_port_group_id": port_group['ID'], "ports": ports_str } port_group_list.append(port_g) return port_group_list except Exception: LOG.error("Failed to get port_groups from OceanStor") raise def list_volume_groups(self, ctx): try: # Get list of OceanStor vol_groups details vol_groups = self.client.get_all_volume_groups() vol_group_list = [] for vol_group in vol_groups: volumes = self.client.get_all_associate_volumes( vol_group['TYPE'], vol_group['ID']) volumes_str = None for volume in volumes: if volumes_str: volumes_str = "{0},{1}".format(volumes_str, volume['ID']) else: volumes_str = "{0}".format(volume['ID']) vol_g = { "name": vol_group['NAME'], "description": vol_group['DESCRIPTION'], "storage_id": self.storage_id, "native_volume_group_id": vol_group['ID'], "volumes": volumes_str } vol_group_list.append(vol_g) return vol_group_list except Exception: LOG.error("Failed to get vol_groups from OceanStor") raise def list_masking_views(self, ctx): try: # Get list of OceanStor masking view details views = self.client.get_all_mapping_views() view_dict = {} for view in views: v = { "name": view['NAME'], "description": view['DESCRIPTION'], "storage_id": self.storage_id, "native_masking_view_id": view['ID'], } view_dict[view['ID']] = v view_keys = view_dict.keys() host_groups = self.client.get_all_host_groups() for host_group in host_groups: hg_views = self.client.get_all_associate_mapping_views( host_group['TYPE'], host_group['ID']) for hg_view in hg_views: v_id = hg_view['ID'] if v_id in view_keys: view_dict[v_id]['native_storage_host_group_id'] =\ host_group['ID'] else: msg = "Missing mapping view for host group id {0}".\ format(host_group['ID']) LOG.info(msg) volume_groups = self.client.get_all_volume_groups() for volume_group in volume_groups: vg_views = self.client.get_all_associate_mapping_views( volume_group['TYPE'], volume_group['ID']) for vg_view in vg_views: v_id = vg_view['ID'] if v_id in view_keys: view_dict[v_id]['native_volume_group_id'] =\ volume_group['ID'] else: msg = "Missing mapping view for volume group id {0}".\ format(volume_group['ID']) LOG.info(msg) port_groups = self.client.get_all_port_groups() for port_group in port_groups: pg_views = self.client.get_all_associate_mapping_views( port_group['TYPE'], port_group['ID']) for pg_view in pg_views: v_id = pg_view['ID'] if v_id in view_keys: view_dict[v_id]['native_port_group_id'] =\ port_group['ID'] else: msg = "Missing mapping view for port group id {0}".\ format(port_group['ID']) LOG.info(msg) return list(view_dict.values()) except Exception: LOG.error("Failed to get view metrics from OceanStor") raise ================================================ FILE: delfin/drivers/huawei/oceanstor/oid_mapper.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class OidMapper(object): """Functions/attributes for oid to alert info mapper""" # Map to translate trap oid strings to oid names OID_MAP = { "1.3.6.1.4.1.2011.2.91.10.3.1.1.1": "hwIsmReportingAlarmNodeCode", "1.3.6.1.4.1.2011.2.91.10.3.1.1.2": "hwIsmReportingAlarmLocationInfo", "1.3.6.1.4.1.2011.2.91.10.3.1.1.3": "hwIsmReportingAlarmRestoreAdvice", "1.3.6.1.4.1.2011.2.91.10.3.1.1.4": "hwIsmReportingAlarmFaultTitle", "1.3.6.1.4.1.2011.2.91.10.3.1.1.5": "hwIsmReportingAlarmFaultType", "1.3.6.1.4.1.2011.2.91.10.3.1.1.6": "hwIsmReportingAlarmFaultLevel", "1.3.6.1.4.1.2011.2.91.10.3.1.1.7": "hwIsmReportingAlarmAlarmID", "1.3.6.1.4.1.2011.2.91.10.3.1.1.8": "hwIsmReportingAlarmFaultTime", "1.3.6.1.4.1.2011.2.91.10.3.1.1.9": "hwIsmReportingAlarmSerialNo", "1.3.6.1.4.1.2011.2.91.10.3.1.1.10": "hwIsmReportingAlarmAdditionInfo", "1.3.6.1.4.1.2011.2.91.10.3.1.1.11": "hwIsmReportingAlarmFaultCategory" } def __init__(self): pass @staticmethod def map_oids(alert): """Translate oids using static map.""" alert_model = dict() for attr in alert: # Remove the instance number at the end of oid before mapping oid_str = attr.rsplit('.', 1)[0] key = OidMapper.OID_MAP.get(oid_str, None) alert_model[key] = alert[attr] return alert_model ================================================ FILE: delfin/drivers/huawei/oceanstor/rest_client.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import requests import six import urllib3 from urllib3.exceptions import InsecureRequestWarning from oslo_log import log as logging from delfin.common import constants from delfin import cryptor from delfin import exception from delfin.drivers.huawei.oceanstor import consts from delfin.ssl_utils import HostNameIgnoreAdapter from delfin.i18n import _ LOG = logging.getLogger(__name__) def _get_timestamp_values(metric, value): timestamp = int(metric['CMO_STATISTIC_TIMESTAMP']) * 1000 return {timestamp: value} def _get_selection(selection): selected_metrics = [] ids = '' for key, value in consts.OCEANSTOR_METRICS.items(): if selection.get(key): selected_metrics.append(key) if ids: ids = ids + ',' + value else: ids = value return selected_metrics, ids class RestClient(object): """Common class for Huawei OceanStor storage system.""" def __init__(self, **kwargs): rest_access = kwargs.get('rest') if rest_access is None: raise exception.InvalidInput('Input rest_access is missing') self.rest_host = rest_access.get('host') self.rest_port = rest_access.get('port') self.rest_username = rest_access.get('username') self.rest_password = rest_access.get('password') # Lists of addresses to try, for authorization address = 'https://%(host)s:%(port)s/deviceManager/rest/' % \ {'host': self.rest_host, 'port': str(self.rest_port)} self.san_address = [address] self.session = None self.url = None self.device_id = None self.verify = None urllib3.disable_warnings(InsecureRequestWarning) self.reset_connection(**kwargs) def reset_connection(self, **kwargs): self.verify = kwargs.get('verify', False) try: self.login() except Exception as ex: msg = "Failed to login to OceanStor: {}".format(ex) LOG.error(msg) raise exception.InvalidCredential(msg) def init_http_head(self): self.url = None self.session = requests.Session() self.session.headers.update({ "Connection": "keep-alive", "Content-Type": "application/json"}) if not self.verify: self.session.verify = False else: LOG.debug("Enable certificate verification, verify: {0}".format( self.verify)) self.session.verify = self.verify self.session.mount("https://", HostNameIgnoreAdapter()) self.session.trust_env = False def do_call(self, url, data, method, calltimeout=consts.SOCKET_TIMEOUT, log_filter_flag=False): """Send requests to Huawei storage server. Send HTTPS call, get response in JSON. Convert response into Python Object and return it. """ if self.url: url = self.url + url kwargs = {'timeout': calltimeout} if data: kwargs['data'] = json.dumps(data) if method in ('POST', 'PUT', 'GET', 'DELETE'): func = getattr(self.session, method.lower()) else: msg = _("Request method %s is invalid.") % method LOG.error(msg) raise exception.StorageBackendException(msg) try: res = func(url, **kwargs) except requests.exceptions.SSLError as e: LOG.error('SSLError exception from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': e}) err_str = six.text_type(e) if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: raise exception.SSLHandshakeFailed() except Exception as err: LOG.exception('Bad response from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) return {"error": {"code": consts.ERROR_CONNECT_TO_SERVER, "description": "Connect to server error."}} try: res.raise_for_status() except requests.HTTPError as exc: return {"error": {"code": exc.response.status_code, "description": six.text_type(exc)}} res_json = res.json() if not log_filter_flag: LOG.info('\n\n\n\nRequest URL: %(url)s\n\n' 'Call Method: %(method)s\n\n' 'Request Data: %(data)s\n\n' 'Response Data:%(res)s\n\n', {'url': url, 'method': method, 'data': data, 'res': res_json}) return res_json def login(self): """Login Huawei storage array.""" device_id = None for item_url in self.san_address: url = item_url + "xx/sessions" data = {"username": self.rest_username, "password": cryptor.decode(self.rest_password), "scope": "0"} self.init_http_head() result = self.do_call(url, data, 'POST', calltimeout=consts.LOGIN_SOCKET_TIMEOUT, log_filter_flag=True) if (result['error']['code'] != 0) or ("data" not in result): LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", {"url": item_url, "reason": result}) continue LOG.debug('Login success: %(url)s', {'url': item_url}) device_id = result['data']['deviceid'] self.device_id = device_id self.url = item_url + device_id self.session.headers['iBaseToken'] = result['data']['iBaseToken'] if (result['data']['accountstate'] in (consts.PWD_EXPIRED, consts.PWD_RESET)): self.logout() msg = _("Password has expired or has been reset, " "please change the password.") LOG.error(msg) raise exception.StorageBackendException(msg) break if device_id is None: msg = _("Failed to login with all rest URLs.") LOG.error(msg) raise exception.StorageBackendException(msg) return device_id def call(self, url, data=None, method=None, log_filter_flag=False): """Send requests to server. If fail, try another RestURL. """ device_id = None old_url = self.url result = self.do_call(url, data, method, log_filter_flag=log_filter_flag) error_code = result['error']['code'] if (error_code == consts.ERROR_CONNECT_TO_SERVER or error_code == consts.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error("Can't open the recent url, relogin.") device_id = self.login() if device_id is not None: LOG.debug('Replace URL: \n' 'Old URL: %(old_url)s\n,' 'New URL: %(new_url)s\n.', {'old_url': old_url, 'new_url': self.url}) result = self.do_call(url, data, method, log_filter_flag=log_filter_flag) if result['error']['code'] in consts.RELOGIN_ERROR_PASS: result['error']['code'] = 0 return result def paginated_call(self, url, data=None, method=None, params=None, log_filter_flag=False, page_size=consts.QUERY_PAGE_SIZE): if params: url = "{0}?{1}".format(url, params) else: url = "{0}?".format(url) result_list = [] start, end = 0, page_size msg = _('Query resource volume error') while True: url_p = "{0}range=[{1}-{2}]".format(url, start, end) start, end = end, end + page_size result = self.call(url_p, data, method, log_filter_flag) self._assert_rest_result(result, msg) # Empty data if this is first page, OR last page got all data if 'data' not in result: break result_list.extend(result['data']) # Check if this is last page if len(result['data']) < page_size: break return result_list def logout(self): """Logout the session.""" url = "/sessions" if self.url: result = self.do_call(url, None, "DELETE") self._assert_rest_result(result, _('Logout session error.')) def _assert_rest_result(self, result, err_str): if result['error']['code'] != 0: msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': result}) LOG.error(msg) raise exception.StorageBackendException(msg) def _assert_data_in_result(self, result, msg): if 'data' not in result: err_msg = _('%s "data" is not in result.') % msg LOG.error(err_msg) raise exception.StorageBackendException(err_msg) def get_storage(self): url = "/system/" result = self.call(url, method='GET', log_filter_flag=True) msg = _('Get storage error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_all_controllers(self): url = "/controller" result = self.call(url, method='GET', log_filter_flag=True) msg = _('Get controller error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_all_ports(self): url = "/fc_port" fc_ports = self.paginated_call( url, None, "GET", log_filter_flag=True) url = "/fcoe_port" fcoe_ports = self.paginated_call( url, None, "GET", log_filter_flag=True) url = "/eth_port" eth_ports = self.paginated_call( url, None, "GET", log_filter_flag=True) url = "/pcie_port" pcie_ports = self.paginated_call( url, None, "GET", log_filter_flag=True) url = "/bond_port" bond_ports = self.paginated_call( url, None, "GET", log_filter_flag=True) url = "/sas_port" sas_ports = self.paginated_call( url, None, "GET", log_filter_flag=True) return fc_ports + fcoe_ports + eth_ports\ + pcie_ports + bond_ports + sas_ports def get_all_volumes(self): url = "/lun" return self.paginated_call(url, None, "GET", log_filter_flag=True) def get_all_disks(self): url = "/disk" return self.paginated_call(url, None, "GET", log_filter_flag=True) def get_all_pools(self): url = "/storagepool" return self.paginated_call(url, None, "GET", log_filter_flag=True) def get_all_filesystems(self): url = "/filesystem" return self.paginated_call(url, None, "GET", log_filter_flag=True) def get_all_qtrees(self, filesystems): url = "/quotatree" qt_list = [] for fs in filesystems: params = "PARENTTYPE=40&PARENTID={0}&".format(fs['ID']) qt = self.paginated_call(url, None, "GET", params=params, log_filter_flag=True) qt_list.extend(qt) return qt_list def get_all_filesystem_quotas(self, fs_id): url = "/FS_QUOTA" params = "PARENTTYPE=40&PARENTID={0}&".format(fs_id) return self.paginated_call(url, None, "GET", params=params, log_filter_flag=True) def get_all_qtree_quotas(self, qt_id): url = "/FS_QUOTA" params = "PARENTTYPE=16445&PARENTID={0}&".format(qt_id) return self.paginated_call(url, None, "GET", params=params, log_filter_flag=True) def get_all_shares(self): url = "/CIFSHARE" cifs = self.paginated_call(url, None, "GET", log_filter_flag=True) url = "/NFSHARE" nfs = self.paginated_call(url, None, "GET", log_filter_flag=True) url = "/FTP_SHARE_AUTH_CLIENT" ftps = self.paginated_call(url, None, "GET", log_filter_flag=True) return cifs + nfs + ftps def get_all_mapping_views(self): url = "/mappingview" view = self.paginated_call(url, None, "GET", log_filter_flag=True) return view def get_all_associate_resources(self, url, obj_type, obj_id): params = "ASSOCIATEOBJTYPE={0}&ASSOCIATEOBJID={1}&".format(obj_type, obj_id) return self.paginated_call(url, None, "GET", params=params, log_filter_flag=True) def get_all_associate_mapping_views(self, obj_type, obj_id): url = "/mappingview/associate" return self.get_all_associate_resources(url, obj_type, obj_id) def get_all_associate_hosts(self, obj_type, obj_id): url = "/host/associate" return self.get_all_associate_resources(url, obj_type, obj_id) def get_all_associate_volumes(self, obj_type, obj_id): url = "/lun/associate" return self.get_all_associate_resources(url, obj_type, obj_id) def get_all_associate_ports(self, obj_type, obj_id): eth_ports = self.get_all_associate_resources( "/eth_port/associate", obj_type, obj_id) fc_ports = self.get_all_associate_resources( "/fc_port/associate", obj_type, obj_id) fcoe_ports = self.get_all_associate_resources( "/fcoe_port/associate", obj_type, obj_id) return eth_ports + fc_ports + fcoe_ports def get_all_hosts(self): url = "/host" host = self.paginated_call(url, None, "GET", log_filter_flag=True) return host def get_all_initiators(self): url = "/fc_initiator" fc_i = self.paginated_call(url, None, "GET", log_filter_flag=True) url = "/iscsi_initiator" iscsi_i = self.paginated_call(url, None, "GET", log_filter_flag=True) url = "/ib_initiator" ib_i = self.paginated_call(url, None, "GET", log_filter_flag=True) return fc_i + iscsi_i + ib_i def get_all_host_groups(self): url = "/hostgroup" hostg = self.paginated_call(url, None, "GET", log_filter_flag=True) return hostg def get_all_port_groups(self): url = "/portgroup" portg = self.paginated_call(url, None, "GET", log_filter_flag=True) return portg def get_all_volume_groups(self): url = "/lungroup" lungroup = self.paginated_call(url, None, "GET", log_filter_flag=True) return lungroup def clear_alert(self, sequence_number): url = "/alarm/currentalarm?sequence=%s" % sequence_number # Result always contains error code and description result = self.call(url, method="DELETE", log_filter_flag=True) if result['error']['code']: msg = 'Clear alert failed with reason: %s.' \ % result['error']['description'] raise exception.InvalidResults(msg) return result def list_alerts(self): url = "/alarm/currentalarm" result_list = self.paginated_call(url, None, "GET", log_filter_flag=True) return result_list def _get_performance_switch(self): url = "/performance_statistic_switch" result = self.call(url, method='GET', log_filter_flag=True) msg = _('Get performance_statistic_switch failed.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def _set_performance_switch(self, value): url = "/performance_statistic_switch" data = {"CMO_PERFORMANCE_SWITCH": value} result = self.call(url, data, method='PUT', log_filter_flag=True) msg = _('Set performance_statistic_switch failed.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def _get_performance_strategy(self): url = "/performance_statistic_strategy" result = self.call(url, method='GET', log_filter_flag=True) msg = _('Get performance_statistic_strategy failed.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def _set_performance_strategy(self, hist_enable=1, hist_duration=60, auto_stop=0, duration=5, max_duration=0): url = "/performance_statistic_strategy" data = { "CMO_STATISTIC_ARCHIVE_SWITCH": hist_enable, "CMO_STATISTIC_ARCHIVE_TIME": hist_duration, "CMO_STATISTIC_AUTO_STOP": auto_stop, "CMO_STATISTIC_INTERVAL": duration, "CMO_STATISTIC_MAX_TIME": max_duration } result = self.call(url, data, method='PUT', log_filter_flag=True) msg = _('Set performance_statistic_strategy failed.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def _get_metrics(self, resource_type, resource_id, metrics_ids): url = "/performace_statistic/cur_statistic_data" params = "CMO_STATISTIC_UUID={0}:{1}&CMO_STATISTIC_DATA_ID_LIST={2}&"\ "timeConversion=0&"\ .format(resource_type, resource_id, metrics_ids) return self.paginated_call(url, None, "GET", params=params, log_filter_flag=True) def enable_metrics_collection(self): return self._set_performance_switch('1') def disable_metrics_collection(self): return self._set_performance_switch('0') def configure_metrics_collection(self): self.disable_metrics_collection() self._set_performance_strategy(hist_enable=1, hist_duration=300, auto_stop=0, duration=60, max_duration=0) self.enable_metrics_collection() def get_pool_metrics(self, storage_id, selection): pools = self.get_all_pools() pool_metrics = [] select_metrics, select_ids = _get_selection(selection) for pool in pools: try: metrics = self._get_metrics(pool['TYPE'], pool['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'pool', 'resource_id': pool['ID'], 'resource_name': pool['NAME'], 'type': 'RAW', 'unit': consts.POOL_CAP[key]['unit'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) pool_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for pool:{0} error: {1}" \ .format(pool['NAME'], ex) LOG.error(msg) return pool_metrics def get_volume_metrics(self, storage_id, selection): volumes = self.get_all_volumes() volume_metrics = [] select_metrics, select_ids = _get_selection(selection) for volume in volumes: try: metrics = self._get_metrics(volume['TYPE'], volume['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'volume', 'resource_id': volume['ID'], 'resource_name': volume['NAME'], 'type': 'RAW', 'unit': consts.VOLUME_CAP[key]['unit'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) volume_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for volume:{0} error: {1}" \ .format(volume['NAME'], ex) LOG.error(msg) return volume_metrics def get_controller_metrics(self, storage_id, selection): controllers = self.get_all_controllers() controller_metrics = [] select_metrics, select_ids = _get_selection(selection) for controller in controllers: try: metrics = self._get_metrics(controller['TYPE'], controller['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'controller', 'resource_id': controller['ID'], 'resource_name': controller['NAME'], 'type': 'RAW', 'unit': consts.CONTROLLER_CAP[key]['unit'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) controller_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for controller:{0} error: {1}" \ .format(controller['NAME'], ex) LOG.error(msg) return controller_metrics def get_port_metrics(self, storage_id, selection): ports = self.get_all_ports() port_metrics = [] select_metrics, select_ids = _get_selection(selection) for port in ports: # ETH_PORT collection not supported if port['TYPE'] == 213: continue try: metrics = self._get_metrics(port['TYPE'], port['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'port', 'resource_id': port['ID'], 'resource_name': port['NAME'], 'type': 'RAW', 'unit': consts.PORT_CAP[key]['unit'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) port_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for port:{0} error: {1}" \ .format(port['NAME'], ex) LOG.error(msg) return port_metrics def get_disk_metrics(self, storage_id, selection): disks = self.get_all_disks() disk_metrics = [] select_metrics, select_ids = _get_selection(selection) for disk in disks: try: metrics = self._get_metrics(disk['TYPE'], disk['ID'], select_ids) for metric in metrics: data_list = metric['CMO_STATISTIC_DATA_LIST'].split(",") for index, key in enumerate(select_metrics): data = int(data_list[index]) if key in consts.CONVERT_TO_MILLI_SECOND_LIST: data = data * 1000 labels = { 'storage_id': storage_id, 'resource_type': 'disk', 'resource_id': disk['ID'], 'type': 'RAW', 'unit': consts.DISK_CAP[key]['unit'], 'resource_name': disk['MODEL'] + ':' + disk['SERIALNUMBER'] } values = _get_timestamp_values(metric, data) m = constants.metric_struct(name=key, labels=labels, values=values) disk_metrics.append(m) except Exception as ex: msg = "Failed to get metrics for disk:{0} error: {1}"\ .format(disk['ID'], ex) LOG.error(msg) return disk_metrics ================================================ FILE: delfin/drivers/ibm/__init__.py ================================================ ================================================ FILE: delfin/drivers/ibm/ds8k/__init__.py ================================================ ================================================ FILE: delfin/drivers/ibm/ds8k/alert_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import six from oslo_log import log from delfin import exception from delfin.common import alert_util from delfin.common import constants LOG = log.getLogger(__name__) class AlertHandler(object): TIME_PATTERN = "%Y-%m-%dT%H:%M:%S%z" ALERT_LEVEL_MAP = {'error': constants.Severity.CRITICAL, 'warning': constants.Severity.WARNING, 'info': constants.Severity.INFORMATIONAL } SECONDS_TO_MS = 1000 def parse_queried_alerts(self, alert_model_list, alert_list, query_para): alerts = alert_list.get('data', {}).get('events') if alerts: for alert in alerts: try: occur_time = int(time.mktime(time.strptime( alert.get('time'), self.TIME_PATTERN))) * AlertHandler.SECONDS_TO_MS if not alert_util.is_alert_in_time_range( query_para, occur_time): continue alert_model = {} alert_model['alert_id'] = alert.get('type') alert_model['alert_name'] = alert.get('description') alert_model['severity'] = self.ALERT_LEVEL_MAP.get( alert.get('severity'), constants.Severity.INFORMATIONAL) alert_model['description'] = alert.get('description') alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert.get('id') alert_model['occur_time'] = occur_time alert_model['resource_type'] = \ constants.DEFAULT_RESOURCE_TYPE alert_model_list.append(alert_model) except Exception as e: LOG.error(e) err_msg = "Failed to build alert model as some" \ " attributes missing in queried alerts: %s"\ % (six.text_type(e)) raise exception.InvalidResults(err_msg) ================================================ FILE: delfin/drivers/ibm/ds8k/consts.py ================================================ HOST_PORT_URL = '/api/v1/host_ports' HOST_URL = '/api/v1/hosts' ================================================ FILE: delfin/drivers/ibm/ds8k/ds8k.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import constants from delfin.drivers import driver from delfin.drivers.ibm.ds8k import rest_handler, alert_handler, consts LOG = log.getLogger(__name__) class DS8KDriver(driver.StorageDriver): PORT_TYPE_MAP = {'FC-AL': constants.PortType.FC, 'SCSI-FCP': constants.PortType.FC, 'FICON': constants.PortType.FICON } PORT_STATUS_MAP = { 'online': constants.PortHealthStatus.NORMAL, 'offline': constants.PortHealthStatus.ABNORMAL, 'fenced': constants.PortHealthStatus.UNKNOWN, 'quiescing': constants.PortHealthStatus.UNKNOWN } INITIATOR_STATUS_MAP = {'logged in': constants.InitiatorStatus.ONLINE, 'logged out': constants.InitiatorStatus.OFFLINE, 'unconfigured': constants.InitiatorStatus.UNKNOWN } def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_handler = rest_handler.RestHandler(**kwargs) self.rest_handler.login() def reset_connection(self, context, **kwargs): self.rest_handler.logout() self.rest_handler.verify = kwargs.get('verify', False) self.rest_handler.login() def close_connection(self): self.rest_handler.logout() def get_storage(self, context): try: result = None system_info = self.rest_handler.get_rest_info('/api/v1/systems') if system_info: system_data = system_info.get('data', {}).get('systems', []) if system_data: for system in system_data: name = system.get('name') model = system.get('MTM') serial_number = system.get('sn') version = system.get('release') status = constants.StorageStatus.NORMAL if system.get('state') != 'online': status = constants.StorageStatus.ABNORMAL total = 0 free = 0 used = 0 raw = 0 if system.get('cap') != '' and \ system.get('cap') is not None: total = int(system.get('cap')) if system.get('capraw') != '' and \ system.get('capraw') is not None: raw = int(system.get('capraw')) if system.get('capalloc') != '' and \ system.get('capalloc') is not None: used = int(system.get('capalloc')) if system.get('capavail') != '' and \ system.get('capavail') is not None: free = int(system.get('capavail')) result = { 'name': name, 'vendor': 'IBM', 'model': model, 'status': status, 'serial_number': serial_number, 'firmware_version': version, 'location': '', 'total_capacity': total, 'raw_capacity': raw, 'used_capacity': used, 'free_capacity': free } break else: raise exception.StorageBackendException( "ds8k storage system info is None") else: raise exception.StorageBackendException( "ds8k storage system info is None") return result except Exception as err: err_msg = "Failed to get storage attributes from ds8k: %s" % \ (six.text_type(err)) raise exception.InvalidResults(err_msg) def list_storage_pools(self, context): pool_info = self.rest_handler.get_rest_info('/api/v1/pools') pool_list = [] status = constants.StoragePoolStatus.NORMAL if pool_info is not None: pool_data = pool_info.get('data', {}).get('pools', []) for pool in pool_data: if pool.get('stgtype') == 'fb': pool_type = constants.StorageType.BLOCK else: pool_type = constants.StorageType.FILE if (int(pool.get('capalloc')) / int(pool.get('cap'))) * 100 > \ int(pool.get('threshold')): status = constants.StoragePoolStatus.ABNORMAL pool_name = '%s_%s' % (pool.get('name'), pool.get('node')) pool_result = { 'name': pool_name, 'storage_id': self.storage_id, 'native_storage_pool_id': str(pool.get('id')), 'status': status, 'storage_type': pool_type, 'total_capacity': int(pool.get('cap')), 'used_capacity': int(pool.get('capalloc')), 'free_capacity': int(pool.get('capavail')) } pool_list.append(pool_result) return pool_list def list_volumes(self, context): volume_list = [] pool_list = self.rest_handler.get_rest_info('/api/v1/pools') if pool_list is not None: pool_data = pool_list.get('data', {}).get('pools', []) for pool in pool_data: url = '/api/v1/pools/%s/volumes' % pool.get('id') volumes = self.rest_handler.get_rest_info(url) if volumes is not None: vol_entries = volumes.get('data', {}).get('volumes', []) for volume in vol_entries: total = volume.get('cap') used = volume.get('capalloc') vol_type = constants.VolumeType.THICK if \ volume.get('stgtype') == 'fb' else \ constants.VolumeType.THIN status = constants.StorageStatus.NORMAL if \ volume.get('state') == 'normal' else\ constants.StorageStatus.ABNORMAL vol_name = '%s_%s' % (volume.get('name'), volume.get('id')) vol = { 'name': vol_name, 'storage_id': self.storage_id, 'description': '', 'status': status, 'native_volume_id': str(volume.get('id')), 'native_storage_pool_id': volume.get('pool').get('id'), 'wwn': '', 'type': vol_type, 'total_capacity': int(total), 'used_capacity': int(used), 'free_capacity': int(total) - int(used) } volume_list.append(vol) return volume_list def list_alerts(self, context, query_para=None): alert_model_list = [] alert_list = self.rest_handler.get_rest_info( '/api/v1/events?severity=warning,error') alert_handler.AlertHandler() \ .parse_queried_alerts(alert_model_list, alert_list, query_para) return alert_model_list @staticmethod def division_port_wwn(original_wwn): result_wwn = None if not original_wwn: return result_wwn is_first = True for i in range(0, len(original_wwn), 2): if is_first is True: result_wwn = '%s' % (original_wwn[i:i + 2]) is_first = False else: result_wwn = '%s:%s' % (result_wwn, original_wwn[i:i + 2]) return result_wwn def list_ports(self, context): port_list = [] port_info = self.rest_handler.get_rest_info('/api/v1/ioports') if port_info: port_data = port_info.get('data', {}).get('ioports', []) for port in port_data: status = DS8KDriver.PORT_STATUS_MAP.get( port.get('state'), constants.PortHealthStatus.UNKNOWN) speed = None connection_status = constants.PortConnectionStatus.CONNECTED\ if status == constants.PortHealthStatus.NORMAL \ else constants.PortConnectionStatus.DISCONNECTED if port.get('speed'): speed = int(port.get('speed').split(' ')[0]) * units.G port_result = { 'name': port.get('loc'), 'storage_id': self.storage_id, 'native_port_id': port.get('id'), 'location': port.get('loc'), 'connection_status': connection_status, 'health_status': status, 'type': DS8KDriver.PORT_TYPE_MAP.get( port.get('protocol'), constants.PortType.OTHER), 'logical_type': '', 'speed': speed, 'max_speed': speed, 'wwn': DS8KDriver.division_port_wwn(port.get('wwpn')) } port_list.append(port_result) return port_list def list_controllers(self, context): controller_list = [] controller_info = self.rest_handler.get_rest_info('/api/v1/nodes') if controller_info: contrl_data = controller_info.get('data', {}).get('nodes', []) for contrl in contrl_data: status = constants.ControllerStatus.NORMAL if \ contrl.get('state') == 'online' else \ constants.ControllerStatus.UNKNOWN controller_result = { 'name': contrl.get('id'), 'storage_id': self.storage_id, 'native_controller_id': contrl.get('id'), 'status': status } controller_list.append(controller_result) return controller_list def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): pass def clear_alert(self, context, alert): pass @staticmethod def get_access_url(): return 'https://{ip}:{port}' def list_storage_hosts(self, context): try: host_list = [] hosts = self.rest_handler.get_rest_info(consts.HOST_URL) if not hosts: return host_list host_entries = hosts.get('data', {}).get('hosts', []) for host in host_entries: status = constants.HostStatus.NORMAL if \ host.get('state') == 'online' else \ constants.HostStatus.OFFLINE os_type = constants.HostOSTypes.VMWARE_ESX if \ host.get('hosttype') == 'VMware' else \ constants.HostOSTypes.UNKNOWN host_result = { "name": host.get('name'), "storage_id": self.storage_id, "native_storage_host_id": host.get('name'), "os_type": os_type, "status": status } host_list.append(host_result) return host_list except Exception as e: LOG.error("Failed to get hosts from ds8k") raise e def list_masking_views(self, context): try: view_list = [] hosts = self.rest_handler.get_rest_info(consts.HOST_URL) if not hosts: return view_list host_entries = hosts.get('data', {}).get('hosts', []) for host in host_entries: view_url = '%s/%s/mappings' % (consts.HOST_URL, host.get('name')) views = self.rest_handler.get_rest_info(view_url) if not views: continue view_entries = views.get('data', {}).get('mappings', []) for view in view_entries: view_id = '%s_%s' % (view.get('lunid'), host.get('name')) view_result = { "name": view_id, "native_storage_host_id": host.get('name'), "storage_id": self.storage_id, "native_volume_id": view.get('volume', {}).get('id'), "native_masking_view_id": view_id, } view_list.append(view_result) return view_list except Exception as e: LOG.error("Failed to get views from ds8k") raise e def list_storage_host_initiators(self, context): try: initiator_list = [] host_ports = self.rest_handler.get_rest_info(consts.HOST_PORT_URL) if not host_ports: return initiator_list port_entries = host_ports.get('data', {}).get('host_ports', []) for port in port_entries: status = DS8KDriver.INITIATOR_STATUS_MAP.get(port.get('state')) init_result = { "name": port.get('wwpn'), "storage_id": self.storage_id, "native_storage_host_initiator_id": port.get('wwpn'), "wwn": port.get('wwpn'), "status": status, "type": constants.InitiatorType.UNKNOWN, "native_storage_host_id": port.get('host', {}).get('name') } initiator_list.append(init_result) return initiator_list except Exception as e: LOG.error("Failed to get initiators from ds8k") raise e ================================================ FILE: delfin/drivers/ibm/ds8k/rest_handler.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import six from oslo_log import log as logging from delfin import cryptor from delfin import exception from delfin.drivers.utils.rest_client import RestClient LOG = logging.getLogger(__name__) class RestHandler(RestClient): REST_TOKEN_URL = '/api/v1/tokens' def __init__(self, **kwargs): self.session_lock = threading.Lock() super(RestHandler, self).__init__(**kwargs) def call_with_token(self, url, data, method): auth_key = None if self.session: auth_key = self.session.headers.get('X-Auth-Token', None) if auth_key: self.session.headers['X-Auth-Token'] \ = cryptor.decode(auth_key) res = self.do_call(url, data, method) if auth_key: self.session.headers['X-Auth-Token'] = auth_key return res def call(self, url, data=None, method=None): try: res = self.call_with_token(url, data, method) if res.status_code == 401: LOG.error("Failed to get token,status_code:%s,error_mesg:%s" % (res.status_code, res.text)) self.login() res = self.call_with_token(url, data, method) elif res.status_code == 503: raise exception.InvalidResults(res.text) return res except Exception as e: LOG.error("Method:%s,url:%s failed: %s" % (method, url, six.text_type(e))) raise e def get_rest_info(self, url, data=None, method='GET'): result_json = None res = self.call(url, data, method) if res.status_code == 200: result_json = res.json() return result_json def login(self): try: data = { 'request': { 'params': { "username": self.rest_username, "password": cryptor.decode(self.rest_password) } } } with self.session_lock: if self.session is None: self.init_http_head() res = self.call_with_token( RestHandler.REST_TOKEN_URL, data, 'POST') if res.status_code == 200: result = res.json() self.session.headers['X-Auth-Token'] = \ cryptor.encode(result.get('token').get('token')) else: LOG.error("Login error. URL: %(url)s,Reason: %(reason)s.", {"url": RestHandler.REST_TOKEN_URL, "reason": res.text}) if 'Authentication has failed' in res.text: raise exception.InvalidUsernameOrPassword() else: raise exception.StorageBackendException(res.text) except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e finally: data = None def logout(self): try: if self.session: self.session.close() except Exception as e: err_msg = "Logout error: %s" % (six.text_type(e)) LOG.error(err_msg) raise e ================================================ FILE: delfin/drivers/ibm/storwize_svc/__init__.py ================================================ ================================================ FILE: delfin/drivers/ibm/storwize_svc/consts.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. LOCAL_FILE_PATH = '/delfin/drivers/utils/performance_file/svc/' REMOTE_FILE_PATH = '/dumps/iostats/' IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Input/output operations per second" } READ_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Read input/output operations per second" } WRITE_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Write input/output operations per second" } THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data is " "successfully transferred in MB/s" } READ_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data read is " "successfully transferred in MB/s" } WRITE_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data write is " "successfully transferred in MB/s" } RESPONSE_TIME_DESCRIPTION = { "unit": "ms", "description": "Average time taken for an IO " "operation in ms" } CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of io that are cache hits" } READ_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of read ops that are cache hits" } WRITE_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of write ops that are cache hits" } IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of IO requests in KB" } READ_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of read IO requests in KB" } WRITE_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of write IO requests in KB" } CPU_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of CPU usage" } MEMORY_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of DISK memory usage in percentage" } SERVICE_TIME = { "unit": 'ms', "description": "Service time of the resource in ms" } VOLUME_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "cacheHitRatio": CACHE_HIT_RATIO_DESCRIPTION, "readCacheHitRatio": READ_CACHE_HIT_RATIO_DESCRIPTION, "writeCacheHitRatio": WRITE_CACHE_HIT_RATIO_DESCRIPTION, "ioSize": IO_SIZE_DESCRIPTION, "readIoSize": READ_IO_SIZE_DESCRIPTION, "writeIoSize": WRITE_IO_SIZE_DESCRIPTION } PORT_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION } DISK_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } CONTROLLER_CAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, } ================================================ FILE: delfin/drivers/ibm/storwize_svc/ssh_handler.py ================================================ # Copyright 2020 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import time from itertools import islice import paramiko import six from oslo_log import log as logging from oslo_utils import units from delfin import exception, utils from delfin.common import constants, alert_util from delfin.drivers.ibm.storwize_svc import consts from delfin.drivers.utils.ssh_client import SSHPool from delfin.drivers.utils.tools import Tools LOG = logging.getLogger(__name__) class SSHHandler(object): OID_ERR_ID = '1.3.6.1.4.1.2.6.190.4.3' OID_SEQ_NUMBER = '1.3.6.1.4.1.2.6.190.4.9' OID_LAST_TIME = '1.3.6.1.4.1.2.6.190.4.10' OID_OBJ_TYPE = '1.3.6.1.4.1.2.6.190.4.11' OID_OBJ_NAME = '1.3.6.1.4.1.2.6.190.4.17' OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' TRAP_SEVERITY_MAP = { '1.3.6.1.4.1.2.6.190.1': constants.Severity.CRITICAL, '1.3.6.1.4.1.2.6.190.2': constants.Severity.WARNING, '1.3.6.1.4.1.2.6.190.3': constants.Severity.INFORMATIONAL, } SEVERITY_MAP = {"warning": "Warning", "informational": "Informational", "error": "Major" } CONTRL_STATUS_MAP = {"online": constants.ControllerStatus.NORMAL, "offline": constants.ControllerStatus.OFFLINE, "service": constants.ControllerStatus.NORMAL, "flushing": constants.ControllerStatus.UNKNOWN, "pending": constants.ControllerStatus.UNKNOWN, "adding": constants.ControllerStatus.UNKNOWN, "deleting": constants.ControllerStatus.UNKNOWN } DISK_PHYSICAL_TYPE = { 'fc': constants.DiskPhysicalType.FC, 'sas_direct': constants.DiskPhysicalType.SAS } DISK_STATUS_MAP = { 'online': constants.DiskStatus.NORMAL, 'offline': constants.DiskStatus.OFFLINE, 'excluded': constants.DiskStatus.ABNORMAL, 'degraded_paths': constants.DiskStatus.DEGRADED, 'degraded_ports': constants.DiskStatus.DEGRADED, 'degraded': constants.DiskStatus.DEGRADED } VOLUME_PERF_METRICS = { 'readIops': 'ro', 'writeIops': 'wo', 'readThroughput': 'rb', 'writeThroughput': 'wb', 'readIoSize': 'rb', 'writeIoSize': 'wb', 'responseTime': 'res_time', 'throughput': 'tb', 'iops': 'to', 'ioSize': 'tb', 'cacheHitRatio': 'hrt', 'readCacheHitRatio': 'rhr', 'writeCacheHitRatio': 'whr' } DISK_PERF_METRICS = { 'readIops': 'ro', 'writeIops': 'wo', 'readThroughput': 'rb', 'writeThroughput': 'wb', 'responseTime': 'res_time', 'throughput': 'tb', 'iops': 'to' } CONTROLLER_PERF_METRICS = { 'readIops': 'ro', 'writeIops': 'wo', 'readThroughput': 'rb', 'writeThroughput': 'wb', 'responseTime': 'res_time', 'throughput': 'tb', 'iops': 'to' } PORT_PERF_METRICS = { 'readIops': 'ro', 'writeIops': 'wo', 'readThroughput': 'rb', 'writeThroughput': 'wb', 'throughput': 'tb', 'responseTime': 'res_time', 'iops': 'to' } TARGET_RESOURCE_RELATION = { constants.ResourceType.DISK: 'mdsk', constants.ResourceType.VOLUME: 'vdsk', constants.ResourceType.PORT: 'port', constants.ResourceType.CONTROLLER: 'node' } RESOURCE_PERF_MAP = { constants.ResourceType.DISK: DISK_PERF_METRICS, constants.ResourceType.VOLUME: VOLUME_PERF_METRICS, constants.ResourceType.PORT: PORT_PERF_METRICS, constants.ResourceType.CONTROLLER: CONTROLLER_PERF_METRICS } SECONDS_TO_MS = 1000 ALERT_NOT_FOUND_CODE = 'CMMVC8275E' BLOCK_SIZE = 512 BYTES_TO_BIT = 8 OS_TYPE_MAP = {'generic': constants.HostOSTypes.UNKNOWN, 'hpux': constants.HostOSTypes.HP_UX, 'openvms': constants.HostOSTypes.OPEN_VMS, 'tpgs': constants.HostOSTypes.UNKNOWN, 'vvol': constants.HostOSTypes.UNKNOWN } INITIATOR_STATUS_MAP = {'active': constants.InitiatorStatus.ONLINE, 'offline': constants.InitiatorStatus.OFFLINE, 'inactive': constants.InitiatorStatus.ONLINE } HOST_STATUS_MAP = {'online': constants.HostStatus.NORMAL, 'offline': constants.HostStatus.OFFLINE, 'degraded': constants.HostStatus.DEGRADED, 'mask': constants.HostStatus.NORMAL, } def __init__(self, **kwargs): self.ssh_pool = SSHPool(**kwargs) @staticmethod def handle_split(split_str, split_char, arr_number): split_value = '' if split_str is not None and split_str != '': tmp_value = split_str.split(split_char, 1) if arr_number == 1 and len(tmp_value) > 1: split_value = tmp_value[arr_number].strip() elif arr_number == 0: split_value = tmp_value[arr_number].strip() return split_value @staticmethod def parse_alert(alert): try: alert_model = dict() alert_name = SSHHandler.handle_split(alert.get( SSHHandler.OID_ERR_ID), ':', 1) error_info = SSHHandler.handle_split(alert.get( SSHHandler.OID_ERR_ID), ':', 0) alert_id = SSHHandler.handle_split(error_info, '=', 1) severity = SSHHandler.TRAP_SEVERITY_MAP.get( alert.get(SSHHandler.OID_SEVERITY), constants.Severity.INFORMATIONAL ) alert_model['alert_id'] = str(alert_id) alert_model['alert_name'] = alert_name alert_model['severity'] = severity alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = SSHHandler. \ handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1) timestamp = SSHHandler. \ handle_split(alert.get(SSHHandler.OID_LAST_TIME), '=', 1) time_type = '%a %b %d %H:%M:%S %Y' occur_time = int(time.mktime(time.strptime( timestamp, time_type))) alert_model['occur_time'] = int(occur_time * SSHHandler. SECONDS_TO_MS) alert_model['description'] = alert_name alert_model['resource_type'] = SSHHandler.handle_split( alert.get(SSHHandler.OID_OBJ_TYPE), '=', 1) alert_model['location'] = SSHHandler.handle_split(alert.get( SSHHandler.OID_OBJ_NAME), '=', 1) return alert_model except Exception as e: LOG.error(e) msg = ("Failed to build alert model as some attributes missing " "in alert message:%s.") % (six.text_type(e)) raise exception.InvalidResults(msg) def login(self): try: with self.ssh_pool.item() as ssh: result = SSHHandler.do_exec('lssystem', ssh) if 'is not a recognized command' in result: raise exception.InvalidIpOrPort() except Exception as e: LOG.error("Failed to login ibm storwize_svc %s" % (six.text_type(e))) raise e @staticmethod def do_exec(command_str, ssh): """Execute command""" try: utils.check_ssh_injection(command_str.split()) if command_str is not None and ssh is not None: stdin, stdout, stderr = ssh.exec_command(command_str) res, err = stdout.read(), stderr.read() re = res if res else err result = re.decode() except paramiko.AuthenticationException as ae: LOG.error('doexec Authentication error:{}'.format(ae)) raise exception.InvalidUsernameOrPassword() except Exception as e: err = six.text_type(e) LOG.error('doexec InvalidUsernameOrPassword error') if 'timed out' in err: raise exception.SSHConnectTimeout() elif 'No authentication methods available' in err \ or 'Authentication failed' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err: raise exception.InvalidPrivateKey() else: raise exception.SSHException(err) return result def exec_ssh_command(self, command): try: with self.ssh_pool.item() as ssh: ssh_info = SSHHandler.do_exec(command, ssh) return ssh_info except Exception as e: msg = "Failed to ssh ibm storwize_svc %s: %s" % \ (command, six.text_type(e)) raise exception.SSHException(msg) def change_capacity_to_bytes(self, unit): unit = unit.upper() if unit == 'TB': result = units.Ti elif unit == 'GB': result = units.Gi elif unit == 'MB': result = units.Mi elif unit == 'KB': result = units.Ki else: result = 1 return int(result) def parse_string(self, value): capacity = 0 if value: if value.isdigit(): capacity = float(value) else: unit = value[-2:] capacity = float(value[:-2]) * int( self.change_capacity_to_bytes(unit)) return capacity def get_storage(self): try: system_info = self.exec_ssh_command('lssystem') storage_map = {} self.handle_detail(system_info, storage_map, split=' ') serial_number = storage_map.get('id') status = 'normal' if storage_map.get('statistics_status') == 'on' \ else 'offline' location = storage_map.get('location') free_capacity = self.parse_string(storage_map.get( 'total_free_space')) used_capacity = self.parse_string(storage_map.get( 'total_used_capacity')) raw_capacity = self.parse_string(storage_map.get( 'total_mdisk_capacity')) subscribed_capacity = self.parse_string(storage_map.get( 'virtual_capacity')) total_capacity = int(free_capacity + used_capacity) if total_capacity > raw_capacity: raw_capacity = total_capacity firmware_version = '' if storage_map.get('code_level') is not None: firmware_version = storage_map.get('code_level').split(' ')[0] s = { 'name': storage_map.get('name'), 'vendor': 'IBM', 'model': storage_map.get('product_name'), 'status': status, 'serial_number': serial_number, 'firmware_version': firmware_version, 'location': location, 'total_capacity': total_capacity, 'raw_capacity': int(raw_capacity), 'subscribed_capacity': int(subscribed_capacity), 'used_capacity': int(used_capacity), 'free_capacity': int(free_capacity) } return s except exception.DelfinException as e: err_msg = "Failed to get storage: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def handle_detail(self, deltail_info, detail_map, split): detail_arr = deltail_info.split('\n') for detail in detail_arr: if detail is not None and detail != '': strinfo = detail.split(split, 1) key = strinfo[0] value = '' if len(strinfo) > 1: value = strinfo[1] detail_map[key] = value def list_storage_pools(self, storage_id): try: pool_list = [] pool_info = self.exec_ssh_command('lsmdiskgrp') pool_res = pool_info.split('\n') for i in range(1, len(pool_res)): if pool_res[i] is None or pool_res[i] == '': continue pool_str = ' '.join(pool_res[i].split()) strinfo = pool_str.split(' ') detail_command = 'lsmdiskgrp %s' % strinfo[0] deltail_info = self.exec_ssh_command(detail_command) pool_map = {} self.handle_detail(deltail_info, pool_map, split=' ') status = 'normal' if pool_map.get('status') == 'online' \ else 'offline' total_cap = self.parse_string(pool_map.get('capacity')) free_cap = self.parse_string(pool_map.get('free_capacity')) used_cap = self.parse_string(pool_map.get('used_capacity')) subscribed_capacity = self.parse_string(pool_map.get( 'virtual_capacity')) p = { 'name': pool_map.get('name'), 'storage_id': storage_id, 'native_storage_pool_id': pool_map.get('id'), 'description': '', 'status': status, 'storage_type': constants.StorageType.BLOCK, 'subscribed_capacity': int(subscribed_capacity), 'total_capacity': int(total_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap) } pool_list.append(p) return pool_list except exception.DelfinException as e: err_msg = "Failed to get storage pool: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage pool: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_volumes(self, storage_id): try: volume_list = [] volume_info = self.exec_ssh_command('lsvdisk') volume_res = volume_info.split('\n') for i in range(1, len(volume_res)): if volume_res[i] is None or volume_res[i] == '': continue volume_str = ' '.join(volume_res[i].split()) strinfo = volume_str.split(' ') volume_id = strinfo[0] detail_command = 'lsvdisk -delim : %s' % volume_id deltail_info = self.exec_ssh_command(detail_command) volume_map = {} self.handle_detail(deltail_info, volume_map, split=':') status = 'normal' if volume_map.get('status') == 'online' \ else 'offline' volume_type = 'thin' if volume_map.get('se_copy') == 'yes' \ else 'thick' total_capacity = self.parse_string(volume_map.get('capacity')) free_capacity = self.parse_string(volume_map. get('free_capacity')) used_capacity = self.parse_string(volume_map. get('used_capacity')) compressed = True deduplicated = True if volume_map.get('compressed_copy') == 'no': compressed = False if volume_map.get('deduplicated_copy') == 'no': deduplicated = False v = { 'name': volume_map.get('name'), 'storage_id': storage_id, 'description': '', 'status': status, 'native_volume_id': str(volume_map.get('id')), 'native_storage_pool_id': volume_map.get('mdisk_grp_id'), 'wwn': str(volume_map.get('vdisk_UID')), 'type': volume_type, 'total_capacity': int(total_capacity), 'used_capacity': int(used_capacity), 'free_capacity': int(free_capacity), 'compressed': compressed, 'deduplicated': deduplicated } volume_list.append(v) return volume_list except exception.DelfinException as e: err_msg = "Failed to get storage volume: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage volume: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_alerts(self, query_para): try: alert_list = [] alert_info = self.exec_ssh_command('lseventlog -monitoring yes ' '-message no') alert_res = alert_info.split('\n') for i in range(1, len(alert_res)): if alert_res[i] is None or alert_res[i] == '': continue alert_str = ' '.join(alert_res[i].split()) strinfo = alert_str.split(' ', 1) detail_command = 'lseventlog %s' % strinfo[0] deltail_info = self.exec_ssh_command(detail_command) alert_map = {} self.handle_detail(deltail_info, alert_map, split=' ') occur_time = int(alert_map.get('last_timestamp_epoch')) * \ self.SECONDS_TO_MS if not alert_util.is_alert_in_time_range(query_para, occur_time): continue alert_name = alert_map.get('event_id_text', '') event_id = alert_map.get('event_id') location = alert_map.get('object_name', '') resource_type = alert_map.get('object_type', '') severity = self.SEVERITY_MAP.get(alert_map. get('notification_type')) if severity == 'Informational' or severity is None: continue alert_model = { 'alert_id': event_id, 'alert_name': alert_name, 'severity': severity, 'category': constants.Category.FAULT, 'type': 'EquipmentAlarm', 'sequence_number': alert_map.get('sequence_number'), 'occur_time': occur_time, 'description': alert_name, 'resource_type': resource_type, 'location': location } alert_list.append(alert_model) return alert_list except exception.DelfinException as e: err_msg = "Failed to get storage alert: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage alert: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def fix_alert(self, alert): command_line = 'cheventlog -fix %s' % alert result = self.exec_ssh_command(command_line) if result: if self.ALERT_NOT_FOUND_CODE not in result: raise exception.InvalidResults(six.text_type(result)) LOG.warning("Alert %s doesn't exist.", alert) def list_controllers(self, storage_id): try: controller_list = [] controller_cmd = 'lsnode' control_info = self.exec_ssh_command(controller_cmd) if 'command not found' in control_info: controller_cmd = 'lsnodecanister' control_info = self.exec_ssh_command(controller_cmd) control_res = control_info.split('\n') for i in range(1, len(control_res)): if control_res[i] is None or control_res[i] == '': continue control_str = ' '.join(control_res[i].split()) str_info = control_str.split(' ') control_id = str_info[0] detail_command = '%s %s' % (controller_cmd, control_id) deltail_info = self.exec_ssh_command(detail_command) control_map = {} self.handle_detail(deltail_info, control_map, split=' ') cpu_map = {} cpu_cmd = 'lsnodehw -delim , %s' % control_id cpu_info = self.exec_ssh_command(cpu_cmd) if 'command not found' in cpu_info: cpu_cmd = 'lsnodecanisterhw -delim , %s' % control_id cpu_info = self.exec_ssh_command(cpu_cmd) self.handle_detail(cpu_info, cpu_map, split=',') cpu_actual = cpu_map.get('cpu_actual') cpu_count = cpu_map.get('cpu_count') status = SSHHandler.CONTRL_STATUS_MAP.get( control_map.get('status'), constants.ControllerStatus.UNKNOWN) controller_result = { 'name': control_map.get('name'), 'storage_id': storage_id, 'native_controller_id': control_map.get('id'), 'status': status, 'soft_version': control_map.get('code_level', '').split(' ')[0], 'location': control_map.get('name'), 'cpu_info': cpu_actual, 'cpu_count': int(cpu_count) } controller_list.append(controller_result) return controller_list except Exception as err: err_msg = "Failed to get controller attributes from Storwize: %s"\ % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_disks(self, storage_id): try: disk_list = [] disk_info = self.exec_ssh_command('lsmdisk') disk_res = disk_info.split('\n') for i in range(1, len(disk_res)): if disk_res[i] is None or disk_res[i] == '': continue control_str = ' '.join(disk_res[i].split()) str_info = control_str.split(' ') disk_id = str_info[0] detail_command = 'lsmdisk %s' % disk_id deltail_info = self.exec_ssh_command(detail_command) disk_map = {} self.handle_detail(deltail_info, disk_map, split=' ') status = SSHHandler.DISK_STATUS_MAP.get( disk_map.get('status'), constants.DiskStatus.ABNORMAL) physical_type = SSHHandler.DISK_PHYSICAL_TYPE.get( disk_map.get('fabric_type'), constants.DiskPhysicalType.UNKNOWN) location = '%s_%s' % (disk_map.get('controller_name'), disk_map.get('name')) disk_result = { 'name': disk_map.get('name'), 'storage_id': storage_id, 'native_disk_id': disk_map.get('id'), 'capacity': int(self.parse_string( disk_map.get('capacity'))), 'status': status, 'physical_type': physical_type, 'native_disk_group_id': disk_map.get('mdisk_grp_name'), 'location': location } disk_list.append(disk_result) return disk_list except Exception as err: err_msg = "Failed to get disk attributes from Storwize: %s" % \ (six.text_type(err)) raise exception.InvalidResults(err_msg) def get_fc_port(self, storage_id): port_list = [] fc_info = self.exec_ssh_command('lsportfc') fc_res = fc_info.split('\n') for i in range(1, len(fc_res)): if fc_res[i] is None or fc_res[i] == '': continue control_str = ' '.join(fc_res[i].split()) str_info = control_str.split(' ') port_id = str_info[0] detail_command = 'lsportfc %s' % port_id deltail_info = self.exec_ssh_command(detail_command) port_map = {} self.handle_detail(deltail_info, port_map, split=' ') status = constants.PortHealthStatus.NORMAL conn_status = constants.PortConnectionStatus.CONNECTED if port_map.get('status') != 'active': status = constants.PortHealthStatus.ABNORMAL conn_status = constants.PortConnectionStatus.DISCONNECTED port_type = constants.PortType.FC if port_map.get('type') == 'ethernet': port_type = constants.PortType.ETH location = '%s_%s' % (port_map.get('node_name'), port_map.get('id')) speed = None if port_map.get('port_speed')[:-2].isdigit(): speed = int(self.handle_port_bps( port_map.get('port_speed'), 'fc')) port_result = { 'name': location, 'storage_id': storage_id, 'native_port_id': port_map.get('id'), 'location': location, 'connection_status': conn_status, 'health_status': status, 'type': port_type, 'speed': speed, 'native_parent_id': port_map.get('node_name'), 'wwn': port_map.get('WWPN') } port_list.append(port_result) return port_list def get_iscsi_port(self, storage_id): port_list = [] for i in range(1, 3): port_array = [] port_command = 'lsportip %s' % i port_info = self.exec_ssh_command(port_command) port_arr = port_info.split('\n') port_map = {} for detail in port_arr: if detail is not None and detail != '': strinfo = detail.split(' ', 1) key = strinfo[0] value = '' if len(strinfo) > 1: value = strinfo[1] port_map[key] = value else: if len(port_map) > 1: port_array.append(port_map) port_map = {} continue for port in port_array: if port.get('failover') == 'yes': continue status = constants.PortHealthStatus.ABNORMAL if port.get('state') == 'online': status = constants.PortHealthStatus.NORMAL conn_status = constants.PortConnectionStatus.DISCONNECTED if port.get('link_state') == 'active': conn_status = constants.PortConnectionStatus.CONNECTED port_type = constants.PortType.ETH location = '%s_%s' % (port.get('node_name'), port.get('id')) port_result = { 'name': location, 'storage_id': storage_id, 'native_port_id': location, 'location': location, 'connection_status': conn_status, 'health_status': status, 'type': port_type, 'speed': int(self.handle_port_bps( port.get('speed'), 'eth')), 'native_parent_id': port.get('node_name'), 'mac_address': port.get('MAC'), 'ipv4': port.get('IP_address'), 'ipv4_mask': port.get('mask'), 'ipv6': port.get('IP_address_6') } port_list.append(port_result) return port_list @staticmethod def change_speed_to_bytes(unit): unit = unit.upper() if unit == 'TB': result = units.T elif unit == 'GB': result = units.G elif unit == 'MB': result = units.M elif unit == 'KB': result = units.k else: result = 1 return int(result) def handle_port_bps(self, value, port_type): speed = 0 if value: if value.isdigit(): speed = float(value) else: if port_type == 'fc': unit = value[-2:] speed = float(value[:-2]) * int( self.change_speed_to_bytes(unit)) else: unit = value[-4:-2] speed = float(value[:-4]) * int( self.change_speed_to_bytes(unit)) return speed def list_ports(self, storage_id): try: port_list = [] port_list.extend(self.get_fc_port(storage_id)) port_list.extend(self.get_iscsi_port(storage_id)) return port_list except Exception as err: err_msg = "Failed to get ports attributes from Storwize: %s" % \ (six.text_type(err)) raise exception.InvalidResults(err_msg) @staticmethod def handle_stats_filename(file_name, file_map): name_arr = file_name.split('_') file_type = '%s_%s_%s' % (name_arr[0], name_arr[1], name_arr[2]) file_time = '20%s%s' % (name_arr[3], name_arr[4]) time_pattern = '%Y%m%d%H%M%S' tools = Tools() occur_time = tools.time_str_to_timestamp(file_time, time_pattern) if file_map.get(file_type): file_map[file_type][occur_time] = file_name else: file_map[file_type] = {occur_time: file_name} def get_stats_filelist(self, file_map): stats_file_command = 'lsdumps -prefix /dumps/iostats' file_list = self.exec_ssh_command(stats_file_command) file_line = file_list.split('\n') for file in islice(file_line, 1, None): if file: file_arr = ' '.join(file.split()).split(' ') if len(file_arr) > 1: file_name = file_arr[1] SSHHandler.handle_stats_filename(file_name, file_map) for file_stats in file_map: file_map[file_stats] = sorted(file_map.get(file_stats).items(), key=lambda x: x[0], reverse=False) def packege_data(self, storage_id, resource_type, metrics, metric_map): resource_id = None resource_name = None unit = None for resource_info in metric_map: if resource_type == constants.ResourceType.PORT: port_info = self.get_fc_port(storage_id) if port_info: for fc_port in port_info: if resource_info.strip('0x').upper() == fc_port.get( 'wwn').upper(): resource_id = fc_port.get('native_port_id') resource_name = fc_port.get('name') break else: resource_arr = resource_info.split('_') resource_id = resource_arr[0] resource_name = resource_arr[1] for target in metric_map.get(resource_info): if resource_type == constants.ResourceType.PORT: unit = consts.PORT_CAP[target]['unit'] elif resource_type == constants.ResourceType.VOLUME: unit = consts.VOLUME_CAP[target]['unit'] elif resource_type == constants.ResourceType.DISK: unit = consts.DISK_CAP[target]['unit'] elif resource_type == constants.ResourceType.CONTROLLER: unit = consts.CONTROLLER_CAP[target]['unit'] if 'responseTime' == target: for res_time in metric_map.get(resource_info).get(target): for iops_time in metric_map.get(resource_info).get( 'iops'): if res_time == iops_time: res_value = metric_map.get(resource_info).get( target).get(res_time) iops_value = metric_map.get( resource_info).get('iops').get(iops_time) res_value = \ res_value / iops_value if iops_value else 0 res_value = round(res_value, 3) metric_map[resource_info][target][res_time] = \ res_value break labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_id, 'resource_name': resource_name, 'type': 'RAW', 'unit': unit } metric_value = constants.metric_struct(name=target, labels=labels, values=metric_map.get( resource_info).get( target)) metrics.append(metric_value) @staticmethod def count_metric_data(last_data, now_data, interval, target, metric_type, metric_map, res_id): if not target: return if 'CACHEHITRATIO' not in metric_type.upper(): value = SSHHandler.count_difference(now_data.get(target), last_data.get(target)) else: value = now_data.get( SSHHandler.VOLUME_PERF_METRICS.get(metric_type)) if 'THROUGHPUT' in metric_type.upper(): value = value / interval / units.Mi elif 'IOSIZE' in metric_type.upper(): value = value / units.Ki elif 'IOPS' in metric_type.upper(): value = int(value / interval) elif 'RESPONSETIME' in metric_type.upper(): value = value / interval value = round(value, 3) if metric_map.get(res_id): if metric_map.get(res_id).get(metric_type): if metric_map.get(res_id).get(metric_type).get( now_data.get('time')): metric_map[res_id][metric_type][now_data.get('time')] \ += value else: metric_map[res_id][metric_type][now_data.get('time')] \ = value else: metric_map[res_id][metric_type] = {now_data.get('time'): value} else: metric_map[res_id] = {metric_type: {now_data.get('time'): value}} @staticmethod def count_difference(now_value, last_value): return now_value if now_value < last_value else now_value - last_value @staticmethod def handle_volume_cach_hit(now_data, last_data): rh = SSHHandler.count_difference(now_data.get('rh'), last_data.get('rh')) wh = SSHHandler.count_difference(now_data.get('wh'), last_data.get('wh')) rht = SSHHandler.count_difference(now_data.get('rht'), last_data.get('rht')) wht = SSHHandler.count_difference(now_data.get('wht'), last_data.get('wht')) rhr = rh * 100 / rht if rht > 0 else 0 whr = wh * 100 / wht if wht > 0 else 0 hrt = rhr + whr now_data['rhr'] = rhr now_data['whr'] = whr now_data['hrt'] = hrt def get_date_from_each_file(self, file, metric_map, target_list, resource_type, last_data): with self.ssh_pool.item() as ssh: local_path = '%s/%s' % ( os.path.abspath(os.path.join(os.getcwd())), consts.LOCAL_FILE_PATH) file_xml = Tools.get_remote_file_to_xml( ssh, file[1], local_path, consts.REMOTE_FILE_PATH) if not file_xml: return for data in file_xml: if re.sub(u"\\{.*?}", "", data.tag) == \ SSHHandler.TARGET_RESOURCE_RELATION.get( resource_type): if resource_type == constants.ResourceType.PORT: if data.attrib.get('fc_wwpn'): resource_info = data.attrib.get('fc_wwpn') else: continue elif resource_type == constants. \ ResourceType.CONTROLLER: resource_info = '%s_%s' % ( int(data.attrib.get('node_id'), 16), data.attrib.get('id')) else: resource_info = '%s_%s' % (data.attrib.get('idx'), data.attrib.get('id')) now_data = SSHHandler.package_xml_data(data.attrib, file[0], resource_type) if last_data.get(resource_info): interval = (int(file[0]) - last_data.get( resource_info).get('time')) / units.k if interval <= 0: break if resource_type == constants.ResourceType.VOLUME: SSHHandler.handle_volume_cach_hit( now_data, last_data.get(resource_info)) for target in target_list: device_target = SSHHandler. \ RESOURCE_PERF_MAP.get(resource_type) SSHHandler.count_metric_data( last_data.get(resource_info), now_data, interval, device_target.get(target), target, metric_map, resource_info) last_data[resource_info] = now_data else: last_data[resource_info] = now_data def get_stats_from_file(self, file_list, metric_map, target_list, resource_type, start_time, end_time): if not file_list: return find_first_file = False recent_file = None last_data = {} for file in file_list: if file[0] >= start_time and file[0] <= end_time: if find_first_file is False: if recent_file: self.get_date_from_each_file(recent_file, metric_map, target_list, resource_type, last_data) self.get_date_from_each_file(file, metric_map, target_list, resource_type, last_data) find_first_file = True else: self.get_date_from_each_file(file, metric_map, target_list, resource_type, last_data) recent_file = file @staticmethod def package_xml_data(file_data, file_time, resource_type): rb = 0 wb = 0 res_time = 0 rh = 0 wh = 0 rht = 0 wht = 0 if resource_type == constants.ResourceType.PORT: rb = (int(file_data.get('cbr')) + int(file_data.get('hbr')) + int( file_data.get('lnbr')) + int( file_data.get('rmbr'))) * SSHHandler.BYTES_TO_BIT wb = (int(file_data.get('cbt')) + int(file_data.get('hbt')) + int( file_data.get('lnbt')) + int( file_data.get('rmbt'))) * SSHHandler.BYTES_TO_BIT ro = int(file_data.get('cer')) + int(file_data.get('her')) + int( file_data.get('lner')) + int(file_data.get('rmer')) wo = int(file_data.get('cet')) + int(file_data.get('het')) + int( file_data.get('lnet')) + int(file_data.get('rmet')) res_time = int(file_data.get('dtdt', 0)) / units.Ki else: if resource_type == constants.ResourceType.VOLUME: rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE rh = int(file_data.get('ctrhs')) wh = int(file_data.get('ctwhs')) rht = int(file_data.get('ctrs')) wht = int(file_data.get('ctws')) res_time = int(file_data.get('xl')) elif resource_type == constants.ResourceType.DISK: rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE res_time = int(file_data.get('rq')) + int(file_data.get('wq')) elif resource_type == constants.ResourceType.CONTROLLER: rb = int(file_data.get('rb')) * SSHHandler.BYTES_TO_BIT wb = int(file_data.get('wb')) * SSHHandler.BYTES_TO_BIT res_time = int(file_data.get('rq')) + int(file_data.get('wq')) ro = int(file_data.get('ro')) wo = int(file_data.get('wo')) now_data = { 'rb': rb, 'wb': wb, 'ro': ro, 'wo': wo, 'tb': rb + wb, 'to': ro + wo, 'rh': rh, 'wh': wh, 'rht': rht, 'wht': wht, 'res_time': res_time, 'time': int(file_time) } return now_data def get_stats_file_data(self, file_map, res_type, metrics, storage_id, target_list, start_time, end_time): metric_map = {} for file_tye in file_map: file_list = file_map.get(file_tye) if 'Nv' in file_tye and res_type == constants.ResourceType.VOLUME: self.get_stats_from_file(file_list, metric_map, target_list, constants.ResourceType.VOLUME, start_time, end_time) elif 'Nm' in file_tye and res_type == constants.ResourceType.DISK: self.get_stats_from_file(file_list, metric_map, target_list, constants.ResourceType.DISK, start_time, end_time) elif 'Nn' in file_tye and res_type == constants.ResourceType.PORT: self.get_stats_from_file(file_list, metric_map, target_list, constants.ResourceType.PORT, start_time, end_time) elif 'Nn' in file_tye and res_type == \ constants.ResourceType.CONTROLLER: self.get_stats_from_file(file_list, metric_map, target_list, constants.ResourceType.CONTROLLER, start_time, end_time) self.packege_data(storage_id, res_type, metrics, metric_map) def collect_perf_metrics(self, storage_id, resource_metrics, start_time, end_time): metrics = [] file_map = {} try: self.get_stats_filelist(file_map) if resource_metrics.get(constants.ResourceType.VOLUME): self.get_stats_file_data( file_map, constants.ResourceType.VOLUME, metrics, storage_id, resource_metrics.get(constants.ResourceType.VOLUME), start_time, end_time) if resource_metrics.get(constants.ResourceType.DISK): self.get_stats_file_data( file_map, constants.ResourceType.DISK, metrics, storage_id, resource_metrics.get(constants.ResourceType.DISK), start_time, end_time) if resource_metrics.get(constants.ResourceType.PORT): self.get_stats_file_data( file_map, constants.ResourceType.PORT, metrics, storage_id, resource_metrics.get(constants.ResourceType.PORT), start_time, end_time) if resource_metrics.get(constants.ResourceType.CONTROLLER): self.get_stats_file_data( file_map, constants.ResourceType.CONTROLLER, metrics, storage_id, resource_metrics.get(constants.ResourceType.CONTROLLER), start_time, end_time) except Exception as err: err_msg = "Failed to collect metrics from svc: %s" % \ (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) return metrics def get_latest_perf_timestamp(self): latest_time = 0 stats_file_command = 'lsdumps -prefix /dumps/iostats' file_list = self.exec_ssh_command(stats_file_command) file_line = file_list.split('\n') for file in islice(file_line, 1, None): if file: file_arr = ' '.join(file.split()).split(' ') if len(file_arr) > 1: file_name = file_arr[1] name_arr = file_name.split('_') file_time = '20%s%s' % (name_arr[3], name_arr[4]) time_pattern = '%Y%m%d%H%M%S' tools = Tools() occur_time = tools.time_str_to_timestamp( file_time, time_pattern) if latest_time < occur_time: latest_time = occur_time return latest_time def list_storage_hosts(self, storage_id): try: host_list = [] hosts = self.exec_ssh_command('lshost') host_res = hosts.split('\n') for i in range(1, len(host_res)): if host_res[i] is None or host_res[i] == '': continue control_str = ' '.join(host_res[i].split()) str_info = control_str.split(' ') host_id = str_info[0] detail_command = 'lshost %s' % host_id deltail_info = self.exec_ssh_command(detail_command) host_map = {} self.handle_detail(deltail_info, host_map, split=' ') status = SSHHandler.HOST_STATUS_MAP.get(host_map.get('status')) host_result = { "name": host_map.get('name'), "storage_id": storage_id, "native_storage_host_id": host_map.get('id'), "os_type": SSHHandler.OS_TYPE_MAP.get( host_map.get('type', '').lower()), "status": status } host_list.append(host_result) return host_list except Exception as e: LOG.error("Failed to get host metrics from svc") raise e def list_masking_views(self, storage_id): try: view_list = [] hosts = self.exec_ssh_command('lshostvdiskmap') host_res = hosts.split('\n') for i in range(1, len(host_res)): if host_res[i] is None or host_res[i] == '': continue control_str = ' '.join(host_res[i].split()) str_info = control_str.split(' ') if len(str_info) > 3: host_id = str_info[0] vdisk_id = str_info[3] view_id = '%s_%s' % (str_info[0], str_info[3]) view_result = { "name": view_id, "native_storage_host_id": host_id, "storage_id": storage_id, "native_volume_id": vdisk_id, "native_masking_view_id": view_id, } view_list.append(view_result) return view_list except Exception as e: LOG.error("Failed to get view metrics from svc") raise e def list_storage_host_initiators(self, storage_id): try: initiator_list = [] hosts = self.exec_ssh_command('lshost') host_res = hosts.split('\n') for i in range(1, len(host_res)): if host_res[i] is None or host_res[i] == '': continue control_str = ' '.join(host_res[i].split()) str_info = control_str.split(' ') host_id = str_info[0] detail_command = 'lshost %s' % host_id deltail_info = self.exec_ssh_command(detail_command) init_name = None type = None host_id = None for host in deltail_info.split('\n'): if host: strinfo = host.split(' ', 1) key = strinfo[0] value = None if len(strinfo) > 1: value = strinfo[1] if key == 'WWPN': init_name = value type = 'fc' elif key == 'iscsi_name': init_name = value type = 'iscsi' elif key == 'id': host_id = value elif key == 'state' and init_name: status = SSHHandler.INITIATOR_STATUS_MAP.get(value) init_result = { "name": init_name, "storage_id": storage_id, "native_storage_host_initiator_id": init_name, "wwn": init_name, "status": status, "type": type, "native_storage_host_id": host_id } initiator_list.append(init_result) init_name = None type = None return initiator_list except Exception as e: LOG.error("Failed to get initiators metrics from svc") raise e ================================================ FILE: delfin/drivers/ibm/storwize_svc/storwize_svc.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants from delfin.drivers import driver from delfin.drivers.ibm.storwize_svc import ssh_handler, consts from delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler class StorwizeSVCDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.ssh_hanlder = ssh_handler.SSHHandler(**kwargs) self.ssh_hanlder.login() def reset_connection(self, context, **kwargs): self.ssh_hanlder.login() def get_storage(self, context): return self.ssh_hanlder.get_storage() def list_storage_pools(self, context): return self.ssh_hanlder.list_storage_pools(self.storage_id) def list_volumes(self, context): return self.ssh_hanlder.list_volumes(self.storage_id) def list_controllers(self, context): return self.ssh_hanlder.list_controllers(self.storage_id) def list_ports(self, context): return self.ssh_hanlder.list_ports(self.storage_id) def list_disks(self, context): return self.ssh_hanlder.list_disks(self.storage_id) def list_alerts(self, context, query_para=None): return self.ssh_hanlder.list_alerts(query_para) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return SSHHandler.parse_alert(alert) def clear_alert(self, context, alert): return self.ssh_hanlder.fix_alert(alert) @staticmethod def get_access_url(): return 'https://{ip}' def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): return self.ssh_hanlder.collect_perf_metrics( storage_id, resource_metrics, start_time, end_time) @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver""" return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP, constants.ResourceType.CONTROLLER: consts.CONTROLLER_CAP } } def get_latest_perf_timestamp(self, context): return self.ssh_hanlder.get_latest_perf_timestamp() def list_storage_hosts(self, context): return self.ssh_hanlder.list_storage_hosts(self.storage_id) def list_masking_views(self, context): return self.ssh_hanlder.list_masking_views(self.storage_id) def list_storage_host_initiators(self, context): return self.ssh_hanlder.list_storage_host_initiators(self.storage_id) ================================================ FILE: delfin/drivers/inspur/__init__.py ================================================ ================================================ FILE: delfin/drivers/inspur/as5500/__init__.py ================================================ ================================================ FILE: delfin/drivers/inspur/as5500/as5500.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.drivers.ibm.storwize_svc.storwize_svc import StorwizeSVCDriver class As5500Driver(StorwizeSVCDriver): def get_storage(self, context): storage = super().get_storage(context) storage['vendor'] = 'Inspur' return storage ================================================ FILE: delfin/drivers/macro_san/__init__.py ================================================ ================================================ FILE: delfin/drivers/macro_san/ms/__init__.py ================================================ ================================================ FILE: delfin/drivers/macro_san/ms/consts.py ================================================ # Copyright 2022 The SODA Authors. # Copyright (c) 2022 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from delfin.common import constants # Command ODSP_SH = '/odsp/scripts/odsp_sh.sh' SYSTEM_QUERY = 'system mgt query' SYSTEM_VERSION = 'system mgt getversion' SYSTEM_CPU = 'system mgt getcpuinfo' POOL_LIST = 'pool mgt getlist' RAID_LIST = 'raid mgt getlist -p {}' LUN_LIST = 'lun mgt getlist -p {}' LUN_QUERY = 'lun mgt query -n {}' DSU_LIST = 'dsu mgt getlist' DISK_LIST = 'disk mgt getlist -d {}' DISK_QUERY = 'disk mgt query -d {}' HA_STATUS = 'ha mgt getstatus' CLIENT_INITIATOR_GETLIST = 'client initiator getlist -t all' CLIENT_LIST = 'client mgt getclientlist' CLIENT_HOST = 'client host gethostlist' HOST_GROUP = 'client hostgroup gethglist' HOST_GROUP_N = 'client hostgroup gethostlist -n {}' VOLUME_GROUP = 'client lungroup getlglist' VOLUME_GROUP_N = 'client lungroup getlunlist -n {}' SHARE_LUN_LIST = 'client mgt getsharelunlist -n {}' MAPVIEW = 'client mapview getlist' TARGET_QUERY_PORT_LIST = 'client target queryportlist' SAS_PORT_LIST = 'system sas getportlist -c {}:{}' # character SUCCESSFUL_TAG = 'Command completed successfully.' FAILED_TAG = 'Command failed.' UNKNOWN_COMMAND_TAG = 'Unknown command.' PORT_SUCCESSFUL_TAG = 'Commandcompletedsuccessfully.' COLON = ':' LEFT_HALF_BRACKET = '[' AFTER_HALF_BRACKET = 'Version]' CPU_INFORMATION_BRACKET = 'CPU Information]' SP = 'SP' ODSP_MSC_VERSION_KEY = 'ODSP_MSCVersion' ODSP_DRIVER_VERSION_KEY = 'ODSP_DriverVersion' PROCESSOR_VENDOR_KEY = 'Processor0Vendor_id' PROCESSOR_FREQUENCY_KEY = 'Processor0CPUFrequency' STORAGE_VENDOR = 'MacroSAN' FIELDS_NAME = 'Name:' FIELDS_ENABLE = 'enable' FIELDS_INITIATOR_ALIAS = 'InitiatorAlias:' FIELDS_INITIATOR_HOST = 'N/A' FIELDS_HOST_NAME = 'Host Name:' FIELDS_HOST_NAME_TWO = 'HostName:' FIELDS_HOST_GROUP_NAME = 'Host Group Name:' FIELDS_VOLUME_GROUP_NAME = 'LUN Group Name:' FIELDS_LUN_NAME = 'LUNName:' FIELDS_MAPVIEW_NAME = 'Mapview Name:' FIELDS_LINK_STATUS = 'Link Status' DSU = 'DSU-' DISK = 'Disk-' HA_RUNNING_STATUS = 'HARunningStatus' PORT = 'port' GBPS = 'Gbps' MBPS = 'Mbps' KBPS = 'KBPS' TIME_PATTERN = '%Y-%m-%d %H:%M:%S' # regular expression SYSTEM_CPU_SP_REGULAR = '^\\[SP\\d.* CPU.*]' SYSTEM_VERSION_SP_REGULAR = '\\[SP\\d.* Version\\]' TARGET_PORT_REGULAR = 'port\\-\\d\\:\\d\\:\\d$' # The time limit TIME_LIMIT = 8 # model MODEL_PATH = '{}/delfin/drivers/macro_san/ms/file/{}{}' STORAGE_INFO_REGULAR = '^storage_info.*\\.xls$' STORAGE_INFO_MODEL_REGULAR = '^MS' FTP_PATH_TMP = '/tmp' FTP_PATH_FILE = '/tmp/{}' # alert MACRO_SAN_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' OS_PATH = '{}/delfin/drivers/macro_san/ms/file/alert{}' ALERT_FILE_NAME = 'alarm_history_query.csv.sp' FTP_ALERT_PATH = '/odsp/log/remote' YES_FIELDS = '是' SEVERITY_MAP = { 'fatal': constants.Severity.FATAL, '紧急': constants.Severity.FATAL, 'critical': constants.Severity.CRITICAL, '重要': constants.Severity.MAJOR, 'major': constants.Severity.MAJOR, 'minor': constants.Severity.MINOR, 'warning': constants.Severity.WARNING, '警告': constants.Severity.WARNING, 'informational': constants.Severity.INFORMATIONAL, 'NotSpecified': constants.Severity.NOT_SPECIFIED } class digital_constant(object): ZERO_INT = 0 ONE_INT = 1 MINUS_ONE_INT = -1 TWO_INT = 2 THREE_INT = 3 FOUR_INT = 4 FIVE_INT = 5 SIX_INT = 6 SEVEN_INT = 7 TWELVE_INT = 12 SIXTEEN_INT = 13 THIRTY_SIX = 36 SIXTY = 60 STORAGE_STATUS_MAP = { 'normal': constants.StorageStatus.NORMAL, 'offline': constants.StorageStatus.OFFLINE, 'abnormal': constants.StorageStatus.ABNORMAL, 'takeover': constants.StorageStatus.NORMAL, 'degraded': constants.StorageStatus.DEGRADED, 'unknown': constants.StorageStatus.UNKNOWN, } LIST_VOLUMES_STATUS_MAP = { 'normal': constants.StorageStatus.NORMAL, 'offline': constants.StorageStatus.OFFLINE, 'abnormal': constants.StorageStatus.ABNORMAL, 'error': constants.StorageStatus.ABNORMAL, 'fault': constants.StorageStatus.ABNORMAL, 'faulty': constants.StorageStatus.ABNORMAL, 'degraded': constants.StorageStatus.DEGRADED, 'unknown': constants.StorageStatus.UNKNOWN } VOLUME_TYPE_MAP = { 'disable': constants.VolumeType.THICK, 'enable': constants.VolumeType.THIN } class POOL_STATUS_ABNORMAL(object): FAULTY = 'faulty' FAULT = 'fault' ERROR = 'error' ABNORMAL = 'abnormal' ALL = (FAULTY, FAULT, ERROR, ABNORMAL) class POOL_STATUS_NORMAL(object): OFFLINE = 'offline' NORMAL = 'normal' ALL = (OFFLINE, NORMAL) POOLS_STATUS_MAP = { 'normal': constants.StoragePoolStatus.NORMAL, 'offline': constants.StoragePoolStatus.OFFLINE, 'abnormal': constants.StoragePoolStatus.ABNORMAL, 'error': constants.StoragePoolStatus.ABNORMAL, 'fault': constants.StoragePoolStatus.ABNORMAL, 'faulty': constants.StoragePoolStatus.ABNORMAL, 'unknown': constants.StoragePoolStatus.UNKNOWN, 'degraded': constants.StoragePoolStatus.DEGRADED } DISK_PHYSICAL_TYPE_MAP = { 'ssd': constants.DiskPhysicalType.SSD, 'sata': constants.DiskPhysicalType.SATA, 'sas': constants.DiskPhysicalType.SAS, 'nl-ssd': constants.DiskPhysicalType.NL_SSD, 'fc': constants.DiskPhysicalType.FC, 'lun': constants.DiskPhysicalType.LUN, 'ata': constants.DiskPhysicalType.ATA, 'flash': constants.DiskPhysicalType.FLASH, 'vmdisk': constants.DiskPhysicalType.VMDISK, 'nl-sas': constants.DiskPhysicalType.NL_SAS, 'ssd-card': constants.DiskPhysicalType.SSD_CARD, 'sas-flash-vp': constants.DiskPhysicalType.SAS_FLASH_VP, 'hdd': constants.DiskPhysicalType.HDD, 'unknown': constants.DiskPhysicalType.UNKNOWN } DISK_LOGICAL_TYPE_MAP = { 'free': constants.DiskLogicalType.FREE, 'member': constants.DiskLogicalType.MEMBER, 'hotspare': constants.DiskLogicalType.HOTSPARE, 'cache': constants.DiskLogicalType.CACHE, 'aggregate': constants.DiskLogicalType.AGGREGATE, 'broken': constants.DiskLogicalType.BROKEN, 'foreign': constants.DiskLogicalType.FOREIGN, 'labelmaint': constants.DiskLogicalType.LABELMAINT, 'maintenance': constants.DiskLogicalType.MAINTENANCE, 'shared': constants.DiskLogicalType.SHARED, 'spare': constants.DiskLogicalType.SPARE, 'unassigned': constants.DiskLogicalType.UNASSIGNED, 'unsupported': constants.DiskLogicalType.UNSUPPORTED, 'remote': constants.DiskLogicalType.REMOTE, 'mediator': constants.DiskLogicalType.MEDIATOR, 'data': constants.DiskLogicalType.DATA, 'datadisk': constants.DiskLogicalType.DATA, 'unknown': constants.DiskLogicalType.UNKNOWN } DISK_STATUS_MAP = { 'normal': constants.DiskStatus.NORMAL, 'abnormal': constants.DiskStatus.ABNORMAL, 'fault': constants.DiskStatus.ABNORMAL, 'faulty': constants.DiskStatus.ABNORMAL, 'degraded': constants.DiskStatus.DEGRADED, 'offline': constants.DiskStatus.OFFLINE } CONTROLLERS_STATUS_MAP = { 'normal': constants.ControllerStatus.NORMAL, 'dual--single': constants.ControllerStatus.NORMAL, 'single-single': constants.ControllerStatus.NORMAL, 'single': constants.ControllerStatus.NORMAL, 'offline': constants.ControllerStatus.OFFLINE, 'absent--poweroff': constants.ControllerStatus.OFFLINE, 'poweroff': constants.ControllerStatus.OFFLINE, 'fault': constants.ControllerStatus.FAULT, 'error': constants.ControllerStatus.FAULT, 'abnormal': constants.ControllerStatus.FAULT, 'degraded': constants.ControllerStatus.DEGRADED, 'double-idle': constants.ControllerStatus.NORMAL, 'double': constants.ControllerStatus.NORMAL, 'triple': constants.ControllerStatus.NORMAL, 'quadruple': constants.ControllerStatus.NORMAL, 'unknown': constants.ControllerStatus.UNKNOWN } PORT_CONNECTION_STATUS_MAP = { '1': constants.PortConnectionStatus.CONNECTED, '2': constants.PortConnectionStatus.DISCONNECTED, 'Full-Linkup': constants.PortConnectionStatus.CONNECTED, 'Linkdown': constants.PortConnectionStatus.DISCONNECTED } INITIATOR_TYPE_MAP = { 'fc': constants.InitiatorType.FC, 'iscsi': constants.InitiatorType.ISCSI, 'roce': constants.InitiatorType.NVME_OVER_ROCE, 'sas': constants.InitiatorType.SAS, 'nvme-of': constants.InitiatorType.NVME_OVER_FABRIC, 'unknown': constants.InitiatorType.UNKNOWN } INITIATOR_STATUS_MAP = { 'offline': constants.InitiatorStatus.OFFLINE, 'online': constants.InitiatorStatus.ONLINE, 'normal': constants.InitiatorStatus.ONLINE, 'n/a': constants.InitiatorStatus.UNKNOWN } HOST_OS_TYPES_MAP = { 'linux': constants.HostOSTypes.LINUX, 'windows': constants.HostOSTypes.WINDOWS, 'windows2008': constants.HostOSTypes.WINDOWS, 'solaris': constants.HostOSTypes.SOLARIS, 'hp-ux': constants.HostOSTypes.HP_UX, 'hp_unix': constants.HostOSTypes.HP_UX, 'aix': constants.HostOSTypes.AIX, 'xenserver': constants.HostOSTypes.XEN_SERVER, 'vmware esx': constants.HostOSTypes.VMWARE_ESX, 'esxi': constants.HostOSTypes.VMWARE_ESX, 'linux_vis': constants.HostOSTypes.LINUX_VIS, 'windows server 2012': constants.HostOSTypes.WINDOWS_SERVER_2012, 'windows2012': constants.HostOSTypes.WINDOWS_SERVER_2012, 'oracle vm': constants.HostOSTypes.ORACLE_VM, 'open vms': constants.HostOSTypes.OPEN_VMS, 'mac os': constants.HostOSTypes.MAC_OS, 'svc': constants.HostOSTypes.UNKNOWN, 'other': constants.HostOSTypes.UNKNOWN, 'suse': constants.HostOSTypes.UNKNOWN, 'unknown': constants.HostOSTypes.UNKNOWN } PARSE_ALERT_ALERT_ID = '1.3.6.1.2.1.1.3.0' PARSE_ALERT_TIME = '1.3.6.1.2.1.25.1.2' PARSE_ALERT_STORAGE = '1.3.6.1.4.1.35904.1.2.1.1' PARSE_ALERT_NAME = '1.3.6.1.4.1.35904.1.2.1.4.1' PARSE_ALERT_LOCATION = '1.3.6.1.4.1.35904.1.2.1.4.2' PARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.35904.1.2.1.4.3' PARSE_ALERT_SEVERITY = '1.3.6.1.4.1.35904.1.2.1.4.4' ALERT_NAME_CONFIG = { 'power_supply_failed': '设备供电异常', 'power_supply_failed_reissue': '设备供电异常重发', 'power_supply_normal': '设备供电恢复正常', 'power_supply_abnormal': '设备供电异常', 'power_supply_abnormal_reissue': '设备供电异常重发', 'power_supply_absent': '电源模块不在位', 'power_supply_absent_reissue': '电源模块不在位重发', 'fan_normal': '风扇模块恢复正常', 'fan_failed': '风扇模块故障', 'fan_failed_reissue': '风扇模块故障重发', 'fan_absent': '风扇模块不在位', 'fan_absent_reissue': '风扇模块不在位重发', 'battery_normal': '电池模块恢复正常', 'battery_failed': '电池模块故障', 'battery_failed_reissue': '电池模块故障重发', 'battery_absent': '电池模块不在位', 'battery_absent_reissue': '电池模块不在位重发', 'battery_charging': '电池模块正在充电', 'battery_will_expire': '电池模块即将超期', 'battery_expired': '电池模块超期', 'battery_expired_reissue': '电池模块超期重发', 'battery_model_inconsistent': '电池模块型号不一致', 'temperature_normal': '温度恢复正常', 'temperature_warning': '温度一般告警', 'temperature_warning_reissue': '温度一般告警重发', 'temperature_critical': '温度严重告警', 'temperature_critical_reissue': '温度严重告警重发', 'Voltage_normal': '电压恢复正常', 'Voltage_warning': '电压一般告警', 'Voltage_warning_reissue': '电压一般告警重发', 'Voltage_critical': '电压严重告警', 'Voltage_critical_reissue': '电压严重告警重发', 'sp_power_on': 'SP开机', 'sp_power_off': 'SP关机', 'sp_absent': 'SP不在位', 'sp_memory_shrink': 'SP内存变小', 'sp_reboot_for_memory_insufficient': 'SP内存不足自动重启', 'sp_hardware_abnormally': 'SP硬件异常', 'sp_boot_disk_warning': 'SP系统盘告警', 'ha_auto_recover_disabled': 'HA自动恢复选项被禁用', 'ha_heartbeat_lost': 'HA心跳丢失', 'ha_self_detect_failure': 'HA自检发现故障', 'ha_takeover': 'SP被接管', 'ha_takeover_abnormally': 'SP接管异常', 'ha_recover_successfully': 'SP恢复成功', 'ha_recover_abnormally': 'SP恢复异常', 'ha_peer_sp_abnormally': '对端SP异常', 'cpu_utilization_normal': 'CPU利用率恢复正常', 'cpu_utilization_warning': 'CPU利用率一般告警', 'cpu_utilization_serious': 'CPU利用率重要告警', 'cpu_utilization_critical': 'CPU利用率严重告警', 'memory_utilization_normal': '内存利用率恢复正常', 'memory_utilization_warning': '内存利用率告警', 'sp_average_responsetime_normal': 'SP平均延时恢复正常', 'sp_average_responsetime_warning': 'SP平均延时告警', 'host_average_responsetime_normal': '主机平均延时恢复正常', 'host_average_responsetime_warning': '主机平均延时告警', 'iscsi_port_average_responsetime_normal': 'iSCSI端口平均延时恢复正常', 'iscsi_port_average_responsetime_warning': 'iSCSI端口平均延时告警', 'fc_port_average_responsetime_normal': 'FC端口平均延时恢复正常', 'fc_port_average_responsetime_warning': 'FC端口平均延时告警', 'nvmf_port_average_responsetime_normal': 'NVMf端口平均延时恢复正常', 'nvmf_port_average_responsetime_warning': 'NVMf端口平均延时告警', 'lun_average_responsetime_normal': 'LUN平均延时恢复正常', 'lun_average_responsetime_warning': 'LUN平均延时告警', 'device_busy': '设备管理通道忙', 'sys_lun_cache_capacity_insufficient': 'SYS-LUN-Cache空间不足', 'sys_lun_log_capacity_insufficient': 'SYS-LUN-Log空间不足', 'global_write_cache_disabled_manually': '全局写缓存被手动禁用', 'global_write_cache_disabled_automatically': '全局写缓存被自动禁用', 'cache_vault_has_data': 'Cache Vault中有脏数据', 'software_version_inconsistent': '软件版本不一致', 'license_expired': 'License超期', 'system_failure_reboot': '系统异常重启', 'io_card_safe_remove': 'IO卡安全下电', 'io_card_pullout_forcibly': 'IO卡暴力拔出', 'io_card_abnormal': 'IO卡异常', 'port_linkup': '端口已连接', 'port_linkdown': '端口断开连接', 'port_link_recovery': '端口链路恢复', 'port_link_unstable': '端口链路不稳定', 'port_abnormal': '端口异常', 'port_closed': '端口被关闭', 'port_speed_nonoptimal': '端口非最大速率运行', 'port_optical_transceiver_mismatch': '端口光模块不匹配', 'sas_phy_disabled': 'SAS PHY被禁用', 'sas_phy_inconsistent': 'SAS_PHY速率不一致', 'sas_port_inconsistent': 'SAS端口连接状态不一致', 'i_t_connection_recovery': 'I_T连接恢复', 'i_t_connection_unstable': 'I_T连接不稳定', 'i_t_connected': 'I_T建立连接', 'i_t_unconnected': 'I_T未建立连接', 'i_t_l_insufficient': 'I_T_L低于预期', 'initiator_has_unestablished_connection': 'Initiator存在未建立的连接', 'nvmf_subsystem_full_connected': 'NVMf Subsystem完全连接', 'nvmf_subsystem_partial_connected': 'NVMf Subsystem部分连接', 'nvmf_subsystem_unconnected': 'NVMf Subsystem未连接', 'ep_online': 'EP上线', 'ep_offline': 'EP离线', 'ep_install_unproperly': 'EP未安装到位', 'ep_disordered_link': 'EP拓扑乱序', 'dsu_inconsistent_link': 'DSU拓扑不一致', 'disk_online': '磁盘上线', 'disk_offline': '磁盘异常离线', 'disk_safe_remove': '磁盘安全下电', 'disk_pullout_forcibly': '磁盘暴力拔出', 'disk_warning': '磁盘告警', 'disk_failed': '磁盘故障', 'disk_path_missing': '磁盘路径丢失', 'disk_poweron_time_warning': '磁盘上电时间告警', 'disk_poweron_time_warning_reissue': '磁盘上电时间告警重发', 'ssd_life_remaining_warning': 'SSD预计剩余寿命预警', 'ssd_life_remaining_critical': 'SSD预计剩余寿命严重告警', 'ssd_time_remaining_warning': 'SSD预计可用时间预警', 'ssd_time_remaining_critical': 'SSD预计可用时间严重告警', 'ssd_interface_unknown': 'SSD接口类型未知', 'raid_normal': 'RAID恢复正常', 'raid_degraded': 'RAID降级', 'raid_faulty': 'RAID错误', 'raid_failed': 'RAID故障', 'raid_rebuild_start': 'RAID开始重建', 'raid_rebuild_successfully': 'RAID完成重建', 'raid_cannot_rebuild': 'RAID重建等待热备盘', 'raid_rebuild_paused_abnormally': 'RAID重建失败', 'raid_spare_capacity_warning': 'RAID热备空间告警', 'raid_sync_successfully': 'RAID完成同步', 'raid_sync_failed': 'RAID同步失败', 'raid_disk_type_inconsistent': 'RAID成员磁盘类型不一致', 'lun_normal': 'LUN恢复正常', 'lun_faulty': 'LUN错误', 'lun_write_zero_failed': 'LUN自动置零功能失效', 'lun_write_cache_frozen': 'LUN写缓存被冻结', 'thinlun_expand_failed': 'Thin-LUN自动扩容失败', 'thinlun_physical_capacity_will_useup': 'Thin-LUN物理空间即将用光', 'thinlun_physical_capacity_has_usedup': 'Thin-LUN物理空间已经用光', 'thinlun_metadata_abnormal': 'Thin-LUN元数据异常', 'pool_capacity_normal': '存储池空间使用率恢复正常', 'pool_capacity_warning': '存储池空间使用率一般告警', 'pool_capacity_serious': '存储池空间使用率重要告警', 'pool_capacity_critical': '存储池空间使用率严重告警', 'pool_capacity_has_usedup': '存储池空间已经用光', 'pool_capacity_over_quota': '存储池已分配容量超出配额', 'pool_user_capacity_over_quota': '存储池用户容量超出配额', 'pool_data_protection_capacity_over_quota': '存储池数据保护容量超出配额', 'volume_online': '卷上线', 'volume_offline': '卷离线', 'volume_path_recovery': '卷路径恢复', 'volume_path_missing': '卷路径丢失', 'volume_attached': '卷联机', 'volume_detached': '卷脱机', 'volume_io_error': '卷IO错误', 'volume_average_responsetime_normal': '卷平均延时恢复正常', 'volume_average_responsetime_warning': '卷平均延时告警', 'snapshot_resource_full': '快照资源空间即将用光', 'snapshot_resource_invalid': '快照资源数据无效', 'snapshot_resource_expand_successfully': '快照资源自动扩容成功', 'snapshot_resource_expand_failed': '快照资源自动扩容失败', 'snapshot_point_delete_automatically': '自动删除快照时间点', 'snapshot_point_create_failed': '自动创建快照时间点失败', 'snapshot_rollback_successfully': '快照回滚成功', 'snapshot_rollback_failed': '快照回滚失败', 'replication_start': '开始复制', 'replication_successfully': '复制成功', 'replication_failed': '复制失败', 'replication_scan_failed': '扫描失败', 'replication_replica_faulty': '副本资源复制状态异常', 'xan_link_unreachable': 'XAN链路不可达', 'xan_link_reachable': 'XAN链路恢复', 'sdas_link_unreachable': 'SDAS链路不可达', 'sdas_link_reachable': 'SDAS链路恢复', 'arbiter_unreachable': '节点不能访问仲裁者', 'arbiter_reachable': '节点可以访问仲裁者', 'mirror_auto_swap_successfully': '镜像对自动反转成功', 'mirror_auto_swap_failed': '镜像对自动反转失败', 'mirror_unsynchronized': '镜像对未同步', 'mirror_synchronized': '镜像对恢复已同步', 'mirror_negotiating': '镜像对是协商状态', 'clone_sync_start': '开始克隆同步', 'clone_sync_successfully': '克隆同步成功', 'clone_sync_failed': '克隆同步失败', 'migrate_start': '开始迁移', 'migrate_successfully': '迁移成功', 'migrate_failed': '迁移失败', 'migrate_negotiating': '迁移对是协商状态', 'migrate_auto_disable_failed': '迁移自动禁用失败', 'migrate_itl_remaining': '迁移残留ITL', 'dedup_data_exceed_spec': '重删数据量超过规格', 'dedup_discard_some_fingerprints': '重删丢弃部分指纹', 'sp_temperature_normal': 'SP温度恢复正常', 'sp_temperature_warning': 'SP温度一般告警', 'sp_temperature_warning_reissue': 'SP温度一般告警重发', 'sp_temperature_critical': 'SP温度严重告警', 'sp_temperature_critical_reissue': 'SP温度严重告警重发', 'sp_voltage_normal': 'SP电压恢复正常', 'sp_voltage_warning': 'SP电压一般告警', 'sp_voltage_warning_reissue': 'SP电压一般告警重发', 'sp_voltage_critical': 'SP电压严重告警', 'sp_voltage_critical_reissue': 'SP电压严重告警重发', 'ep_temperature_normal': 'EP温度恢复正常', 'ep_temperature_warning': 'EP温度一般告警', 'ep_temperature_warning_reissue': 'EP温度一般告警重发', 'ep_temperature_critical': 'EP温度严重告警', 'ep_temperature_critical_reissue': 'EP温度严重告警重发', 'spu_bat_normal': 'SPU电池模块恢复正常', 'spu_bat_failed': 'SPU电池模块变为故障', 'spu_bat_failed_reissue': 'SPU电池模块故障重发', 'spu_bat_absent': 'SPU电池模块不在位', 'spu_bat_absent_reissue': 'SPU电池模块不在位重发', 'spu_bat_will_expire': 'SPU电池模块即将超期', 'spu_bat_expired': 'SPU电池模块超期', 'spu_bat_expired_reissue': 'SPU电池模块超期重发', 'cmos_bat_normal': 'CMOS电池恢复正常', 'cmos_bat_failed': 'CMOS电池电力不足', 'cmos_bat_failed_reissue': 'CMOS电池电力不足重发', 'fc_link_error': 'FC链路错误', 'sp_unexpected_power_down': 'SP异常掉电', 'ha_takeover_successfully': 'HA接管成功', 'ha_takeover_failed': 'HA接管失败', 'write_cache_frozen': '写缓存被冻结', 'write_cache_disabled': '写缓存被自动禁用', 'sas_phy_speed_warning': 'SAS_PHY速率告警', 'disk_pullout_electrified': '磁盘带电拔出', 'sys_raid_warning': 'SYS_RAID告警', 'thinlun_physical_capacity_usedup': 'Thin-LUN物理空间已经用光', 'pool_capacity_will_useup': '存储池空间即将用光', 'sdas_link_recovery': 'SDAS链路恢复', 'sdas_auto_swap_successfully': 'SDAS自动反转成功', 'sdas_auto_swap_failed': 'SDAS自动反转失败', } PARSE_ALERT_SEVERITY_MAP = { '0': constants.Severity.NOT_SPECIFIED, '1': constants.Severity.FATAL, '2': constants.Severity.MAJOR, '3': constants.Severity.WARNING, '4': constants.Severity.INFORMATIONAL, } STORAGE_CAP = { constants.StorageMetric.IOPS.name: { "unit": constants.StorageMetric.IOPS.unit, "description": constants.StorageMetric.IOPS.description }, constants.StorageMetric.READ_IOPS.name: { "unit": constants.StorageMetric.READ_IOPS.unit, "description": constants.StorageMetric.READ_IOPS.description }, constants.StorageMetric.WRITE_IOPS.name: { "unit": constants.StorageMetric.WRITE_IOPS.unit, "description": constants.StorageMetric.WRITE_IOPS.description }, constants.StorageMetric.THROUGHPUT.name: { "unit": constants.StorageMetric.THROUGHPUT.unit, "description": constants.StorageMetric.THROUGHPUT.description }, constants.StorageMetric.READ_THROUGHPUT.name: { "unit": constants.StorageMetric.READ_THROUGHPUT.unit, "description": constants.StorageMetric.READ_THROUGHPUT.description }, constants.StorageMetric.WRITE_THROUGHPUT.name: { "unit": constants.StorageMetric.WRITE_THROUGHPUT.unit, "description": constants.StorageMetric.WRITE_THROUGHPUT.description }, constants.StorageMetric.RESPONSE_TIME.name: { "unit": constants.StorageMetric.RESPONSE_TIME.unit, "description": constants.StorageMetric.RESPONSE_TIME.description }, constants.StorageMetric.READ_RESPONSE_TIME.name: { "unit": constants.StorageMetric.READ_RESPONSE_TIME.unit, "description": constants.StorageMetric.READ_RESPONSE_TIME.description }, constants.StorageMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.StorageMetric.WRITE_RESPONSE_TIME.unit, "description": constants.StorageMetric.WRITE_RESPONSE_TIME.description }, constants.StorageMetric.CACHE_HIT_RATIO.name: { "unit": constants.StorageMetric.CACHE_HIT_RATIO.unit, "description": constants.StorageMetric.CACHE_HIT_RATIO.description }, constants.StorageMetric.READ_CACHE_HIT_RATIO.name: { "unit": constants.StorageMetric.READ_CACHE_HIT_RATIO.unit, "description": constants.StorageMetric.READ_CACHE_HIT_RATIO.description }, constants.StorageMetric.WRITE_CACHE_HIT_RATIO.name: { "unit": constants.StorageMetric.WRITE_CACHE_HIT_RATIO.unit, "description": constants.StorageMetric.WRITE_CACHE_HIT_RATIO.description } } VOLUME_CAP = { constants.VolumeMetric.IOPS.name: { "unit": constants.VolumeMetric.IOPS.unit, "description": constants.VolumeMetric.IOPS.description }, constants.VolumeMetric.READ_IOPS.name: { "unit": constants.VolumeMetric.READ_IOPS.unit, "description": constants.VolumeMetric.READ_IOPS.description }, constants.VolumeMetric.WRITE_IOPS.name: { "unit": constants.VolumeMetric.WRITE_IOPS.unit, "description": constants.VolumeMetric.WRITE_IOPS.description }, constants.VolumeMetric.THROUGHPUT.name: { "unit": constants.VolumeMetric.THROUGHPUT.unit, "description": constants.VolumeMetric.THROUGHPUT.description }, constants.VolumeMetric.READ_THROUGHPUT.name: { "unit": constants.VolumeMetric.READ_THROUGHPUT.unit, "description": constants.VolumeMetric.READ_THROUGHPUT.description }, constants.VolumeMetric.WRITE_THROUGHPUT.name: { "unit": constants.VolumeMetric.WRITE_THROUGHPUT.unit, "description": constants.VolumeMetric.WRITE_THROUGHPUT.description }, constants.VolumeMetric.RESPONSE_TIME.name: { "unit": constants.VolumeMetric.RESPONSE_TIME.unit, "description": constants.VolumeMetric.RESPONSE_TIME.description }, constants.VolumeMetric.READ_RESPONSE_TIME.name: { "unit": constants.VolumeMetric.READ_RESPONSE_TIME.unit, "description": constants.VolumeMetric.READ_RESPONSE_TIME.description }, constants.VolumeMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.VolumeMetric.WRITE_RESPONSE_TIME.unit, "description": constants.VolumeMetric.WRITE_RESPONSE_TIME.description }, constants.VolumeMetric.CACHE_HIT_RATIO.name: { "unit": constants.VolumeMetric.CACHE_HIT_RATIO.unit, "description": constants.VolumeMetric.CACHE_HIT_RATIO.description }, constants.VolumeMetric.READ_CACHE_HIT_RATIO.name: { "unit": constants.VolumeMetric.READ_CACHE_HIT_RATIO.unit, "description": constants.VolumeMetric.READ_CACHE_HIT_RATIO.description }, constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.name: { "unit": constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.unit, "description": constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.description } } DISK_CAP = { constants.DiskMetric.IOPS.name: { "unit": constants.DiskMetric.IOPS.unit, "description": constants.DiskMetric.IOPS.description }, constants.DiskMetric.READ_IOPS.name: { "unit": constants.DiskMetric.READ_IOPS.unit, "description": constants.DiskMetric.READ_IOPS.description }, constants.DiskMetric.WRITE_IOPS.name: { "unit": constants.DiskMetric.WRITE_IOPS.unit, "description": constants.DiskMetric.WRITE_IOPS.description }, constants.DiskMetric.THROUGHPUT.name: { "unit": constants.DiskMetric.THROUGHPUT.unit, "description": constants.DiskMetric.THROUGHPUT.description }, constants.DiskMetric.READ_THROUGHPUT.name: { "unit": constants.DiskMetric.READ_THROUGHPUT.unit, "description": constants.DiskMetric.READ_THROUGHPUT.description }, constants.DiskMetric.WRITE_THROUGHPUT.name: { "unit": constants.DiskMetric.WRITE_THROUGHPUT.unit, "description": constants.DiskMetric.WRITE_THROUGHPUT.description }, constants.DiskMetric.RESPONSE_TIME.name: { "unit": constants.DiskMetric.RESPONSE_TIME.unit, "description": constants.DiskMetric.RESPONSE_TIME.description }, constants.DiskMetric.READ_RESPONSE_TIME.name: { "unit": constants.DiskMetric.READ_RESPONSE_TIME.unit, "description": constants.DiskMetric.READ_RESPONSE_TIME.description }, constants.DiskMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.DiskMetric.WRITE_RESPONSE_TIME.unit, "description": constants.DiskMetric.WRITE_RESPONSE_TIME.description }, constants.DiskMetric.CACHE_HIT_RATIO.name: { "unit": constants.DiskMetric.CACHE_HIT_RATIO.unit, "description": constants.DiskMetric.CACHE_HIT_RATIO.description }, constants.DiskMetric.READ_CACHE_HIT_RATIO.name: { "unit": constants.DiskMetric.READ_CACHE_HIT_RATIO.unit, "description": constants.DiskMetric.READ_CACHE_HIT_RATIO.description }, constants.DiskMetric.WRITE_CACHE_HIT_RATIO.name: { "unit": constants.DiskMetric.WRITE_CACHE_HIT_RATIO.unit, "description": constants.DiskMetric.WRITE_CACHE_HIT_RATIO.description } } PORT_CAP = { constants.PortMetric.IOPS.name: { "unit": constants.PortMetric.IOPS.unit, "description": constants.PortMetric.IOPS.description }, constants.PortMetric.READ_IOPS.name: { "unit": constants.PortMetric.READ_IOPS.unit, "description": constants.PortMetric.READ_IOPS.description }, constants.PortMetric.WRITE_IOPS.name: { "unit": constants.PortMetric.WRITE_IOPS.unit, "description": constants.PortMetric.WRITE_IOPS.description }, constants.PortMetric.THROUGHPUT.name: { "unit": constants.PortMetric.THROUGHPUT.unit, "description": constants.PortMetric.THROUGHPUT.description }, constants.PortMetric.READ_THROUGHPUT.name: { "unit": constants.PortMetric.READ_THROUGHPUT.unit, "description": constants.PortMetric.READ_THROUGHPUT.description }, constants.PortMetric.WRITE_THROUGHPUT.name: { "unit": constants.PortMetric.WRITE_THROUGHPUT.unit, "description": constants.PortMetric.WRITE_THROUGHPUT.description }, constants.PortMetric.RESPONSE_TIME.name: { "unit": constants.PortMetric.RESPONSE_TIME.unit, "description": constants.PortMetric.RESPONSE_TIME.description }, constants.PortMetric.READ_RESPONSE_TIME.name: { "unit": constants.PortMetric.READ_RESPONSE_TIME.unit, "description": constants.PortMetric.READ_RESPONSE_TIME.description }, constants.PortMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.PortMetric.WRITE_RESPONSE_TIME.unit, "description": constants.PortMetric.WRITE_RESPONSE_TIME.description }, constants.PortMetric.CACHE_HIT_RATIO.name: { "unit": constants.PortMetric.CACHE_HIT_RATIO.unit, "description": constants.PortMetric.CACHE_HIT_RATIO.description }, constants.PortMetric.READ_CACHE_HIT_RATIO.name: { "unit": constants.PortMetric.READ_CACHE_HIT_RATIO.unit, "description": constants.PortMetric.READ_CACHE_HIT_RATIO.description }, constants.PortMetric.WRITE_CACHE_HIT_RATIO.name: { "unit": constants.PortMetric.WRITE_CACHE_HIT_RATIO.unit, "description": constants.PortMetric.WRITE_CACHE_HIT_RATIO.description } } FTP_PERF_PATH = '/odsp/log/local/perf' STRAGE_REGULAR = '^perf_device' LUN_REGULAR = '^perf_lun' SASPORT_REGULAR = '^perf_sasport' ISCSIPORT_REGULAR = '^perf_iscsiport' FCPORT_REGULAR = '^perf_fciport' DISK_REGULAR = '^perf_disk' SYSTEM_PERFORMANCE_FILE = 'system performance getfilelist' VERSION_SHOW = 'versionshow' CSV = '.csv' SIXTY = 60 ADD_FOLDER = '{}/delfin/drivers/utils/performance_file/macro_san/{}{}{}' PERF_LUN = 'perf_lun_' PERF_SP = '_SP' PERF_SAS_PORT = 'perf_sasport_' PERF_ISCSI_PORT = 'perf_iscsiport_' GET_DATE = 'date +%s' SPECIAL_VERSION = 'Version:' SAS_PORT = 'sasport' ISCSI_PORT = 'iscsiport' FC_PORT = 'fcport' ================================================ FILE: delfin/drivers/macro_san/ms/file/__init__.py ================================================ ================================================ FILE: delfin/drivers/macro_san/ms/macro_ssh_client.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import paramiko import six from oslo_log import log as logging from delfin import cryptor from delfin import exception, utils from delfin.drivers.utils.ssh_client import SSHPool LOG = logging.getLogger(__name__) class MacroSanSSHPool(SSHPool): def create(self): ssh = paramiko.SSHClient() try: if self.ssh_pub_key is None: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) else: host_key = '%s %s %s' % \ (self.ssh_host, self.ssh_pub_key_type, self.ssh_pub_key) self.set_host_key(host_key, ssh) ssh.connect(hostname=self.ssh_host, port=self.ssh_port, username=self.ssh_username, password=cryptor.decode(self.ssh_password), timeout=self.ssh_conn_timeout, banner_timeout=self.ssh_conn_timeout) transport = ssh.get_transport() transport.set_keepalive(self.ssh_conn_timeout) return ssh except Exception as e: err = six.text_type(e) LOG.error(err) if 'timed out' in err: raise exception.InvalidIpOrPort() elif 'No authentication methods available' in err \ or 'Authentication failed' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err: raise exception.InvalidPrivateKey() elif 'not found in known_hosts' in err: raise exception.SSHNotFoundKnownHosts(self.ssh_host) else: raise exception.SSHException(err) def do_exec_shell(self, command_list, sleep_time=0.5): result = '' try: with self.item() as ssh: if command_list and ssh: channel = ssh.invoke_shell() for command in command_list: utils.check_ssh_injection(command) channel.send(command + '\n') time.sleep(sleep_time) channel.send("exit" + "\n") channel.close() while True: resp = channel.recv(9999).decode('utf8') if not resp: break result += resp if 'is not a recognized command' in result: raise exception.InvalidIpOrPort() except paramiko.AuthenticationException as ae: LOG.error('doexec Authentication error:{}'.format(ae)) raise exception.InvalidUsernameOrPassword() except Exception as e: err = six.text_type(e) LOG.error(err) if 'timed out' in err \ or 'SSH connect timeout' in err: raise exception.SSHConnectTimeout() elif 'No authentication methods available' in err \ or 'Authentication failed' in err \ or 'Invalid username or password' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err \ or 'not a valid RSA private key' in err: raise exception.InvalidPrivateKey() elif 'Unable to connect to port' in err \ or 'Invalid ip or port' in err: raise exception.InvalidIpOrPort() else: raise exception.SSHException(err) return result ================================================ FILE: delfin/drivers/macro_san/ms/ms_handler.py ================================================ # Copyright 2022 The SODA Authors. # Copyright (c) 2022 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import codecs import csv import datetime import hashlib import os import re import shutil import tarfile import time import six import xlrd from oslo_log import log from oslo_utils import units from delfin import exception from delfin.common import constants from delfin.drivers.macro_san.ms import consts from delfin.drivers.macro_san.ms.consts import digital_constant from delfin.drivers.macro_san.ms.macro_ssh_client import MacroSanSSHPool from delfin.drivers.utils.tools import Tools LOG = log.getLogger(__name__) class MsHandler(object): def __init__(self, **kwargs): self.ssh_pool = MacroSanSSHPool(**kwargs) ssh_access = kwargs.get('ssh') self.ssh_host = ssh_access.get('host') self.down_lock = True def login(self): res = '' try: res = self.ssh_pool.do_exec_shell([consts.ODSP_SH]) except Exception as e: LOG.error('Failed to ssh login macro_san %s' % ( six.text_type(e))) if consts.UNKNOWN_COMMAND_TAG in res: try: self.ssh_pool.do_exec_shell([consts.SYSTEM_QUERY]) self.down_lock = False except Exception as e: LOG.error('Failed to cli login macro_san %s' % ( six.text_type(e))) raise e def get_storage(self, storage_id): storage_data_map = self.get_data_query(consts.SYSTEM_QUERY) if not storage_data_map: raise exception.SSHException('The command returns empty data') device_uuid = storage_data_map.get('DeviceUUID') serial_number = '{}:{}'.format(self.ssh_host, device_uuid) storage_name = storage_data_map.get('DeviceName') firmware_version = self.get_firmware_version() pools = self.list_storage_pools(storage_id) total_capacity = digital_constant.ZERO_INT used_capacity = digital_constant.ZERO_INT for pool in pools: total_capacity += pool.get('total_capacity') used_capacity += pool.get('used_capacity') disks = self.list_disks(storage_id) raw_capacity = digital_constant.ZERO_INT for disk in disks: raw_capacity += disk.get('capacity') storage_status = self.get_storage_status(storage_id) model = self.get_storage_model(storage_id) storage = { 'name': storage_name if storage_name else device_uuid, 'vendor': consts.STORAGE_VENDOR, 'status': storage_status, 'model': model, 'serial_number': serial_number, 'firmware_version': firmware_version, 'raw_capacity': raw_capacity, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity } return storage def get_storage_model(self, storage_id): storage_model = '' if not self.down_lock: return storage_model local_path = self.download_model_file(storage_id) if local_path: try: storage_model = self.analysis_model_file(local_path, storage_model) finally: shutil.rmtree(local_path) return storage_model @staticmethod def analysis_model_file(local_path, storage_model): list_dir = os.listdir(local_path) for dir_name in list_dir: excel = xlrd.open_workbook('{}/{}'.format(local_path, dir_name)) sheet = excel[consts.digital_constant.ZERO_INT] rows_data_list = sheet.row_values(consts.digital_constant.ONE_INT) for rows_data in rows_data_list: title_pattern = re.compile(consts.STORAGE_INFO_MODEL_REGULAR) title_search_obj = title_pattern.search(rows_data) if title_search_obj: storage_model = rows_data break return storage_model def download_model_file(self, storage_id): sftp = None local_path = '' try: ssh = self.ssh_pool.create() sftp = ssh.open_sftp() file_name_list = sftp.listdir(consts.FTP_PATH_TMP) for file_name in file_name_list: title_pattern = re.compile(consts.STORAGE_INFO_REGULAR) title_search_obj = title_pattern.search(file_name) if title_search_obj: os_path = os.getcwd() localtime = int(time.mktime(time.localtime())) * units.k local_path = consts.MODEL_PATH.format( os_path, storage_id, localtime) os.mkdir(local_path) local_path_file = '{}/{}'.format(local_path, file_name) sftp.get(consts.FTP_PATH_FILE.format(file_name), local_path_file) break except Exception as e: LOG.error('Failed to down storage model file macro_san %s' % (six.text_type(e))) if sftp: sftp.close() return local_path def get_firmware_version(self): firmware_version = None version_map = self.get_storage_version() for sp_num in range( consts.digital_constant.ONE_INT, len(version_map) + consts.digital_constant.ONE_INT): sp_key = '{}{}'.format(consts.SP, sp_num) firmware_version = \ version_map.get(sp_key, {}).get('{}{}'.format( sp_key, consts.ODSP_MSC_VERSION_KEY)) if consts.FIELDS_INITIATOR_HOST != firmware_version: break return firmware_version def get_storage_status(self, storage_id): storage_status = constants.StorageStatus.NORMAL ha_status_map = self.get_data_query(consts.HA_STATUS) ha_status = ha_status_map.get('SystemHAStatus') if ha_status: storage_status = consts.STORAGE_STATUS_MAP.get( ha_status.lower(), constants.StorageStatus.UNKNOWN) else: controllers_list = self.list_controllers(storage_id) for controllers in controllers_list: controllers_status = controllers.get('status') if controllers_status in constants.ControllerStatus.FAULT: storage_status = constants.StorageStatus.ABNORMAL return storage_status def list_storage_pools(self, storage_id): pool_list = [] pools = self.get_data_list(consts.POOL_LIST, consts.FIELDS_NAME) for pool in pools: pool_name = pool.get('Name') health_status = self.get_pool_status(pool_name) total_capacity = Tools.get_capacity_size(pool.get('AllCapacity')) used_capacity = Tools.get_capacity_size(pool.get('UsedCapacity')) pool_model = { 'name': pool_name, 'storage_id': storage_id, 'native_storage_pool_id': pool_name, 'status': health_status, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity } pool_list.append(pool_model) return pool_list def get_pool_status(self, pool_name): raids = self.get_data_list(consts.RAID_LIST.format(pool_name), consts.FIELDS_NAME) pool_status = constants.StoragePoolStatus.UNKNOWN if raids: pool_status = constants.StoragePoolStatus.NORMAL for raid in raids: health_status = raid.get('HealthStatus').lower() \ if raid.get('HealthStatus') else None if health_status in consts.POOL_STATUS_ABNORMAL.ALL: pool_status = constants.StoragePoolStatus.ABNORMAL break if health_status == constants.StoragePoolStatus.DEGRADED: pool_status = constants.StoragePoolStatus.DEGRADED break if health_status not in consts.POOL_STATUS_NORMAL.ALL: pool_status = constants.StoragePoolStatus.UNKNOWN return pool_status def list_volumes(self, storage_id): volume_list = [] pool_volumes = self.get_volumes(storage_id) for volume in pool_volumes: status = volume.get('HealthStatus').lower() \ if volume.get('HealthStatus') else None total_capacity = self.get_total_capacity(volume) thin_provisioning = volume.get('Thin-Provisioning').lower() \ if volume.get('Thin-Provisioning') else None used_capacity = self.get_used_capacity(thin_provisioning, total_capacity, volume) volume_model = { 'name': volume.get('Name'), 'storage_id': storage_id, 'status': consts.LIST_VOLUMES_STATUS_MAP.get( status, constants.StorageStatus.UNKNOWN), 'native_volume_id': volume.get('Name'), 'native_storage_pool_id': volume.get('Owner(Pool)'), 'type': consts.VOLUME_TYPE_MAP.get( thin_provisioning, constants.VolumeType.THICK), 'wwn': volume.get('DeviceID') if volume.get('DeviceID') else volume.get('WWN'), 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity } volume_list.append(volume_model) return volume_list @staticmethod def get_used_capacity(thin_provisioning, total_capacity, volume): if consts.FIELDS_ENABLE == thin_provisioning: used_capacity_str = volume.get('Thin-LUNUsedCapacity') number_b = used_capacity_str.index('B') used_capacity = \ used_capacity_str[:number_b + consts.digital_constant.ONE_INT] used_capacity = Tools.get_capacity_size(used_capacity) else: used_capacity = total_capacity return used_capacity @staticmethod def get_total_capacity(volume): total_size = volume.get('TotalSize') if not total_size: physical_size = volume.get('TotalPhysicalSize') number_b = physical_size.index('B') total_size = \ physical_size[:number_b + consts.digital_constant.ONE_INT] total_capacity = Tools.get_capacity_size(total_size) return total_capacity def list_controllers(self, storage_id): controllers_list = [] sp_map = self.get_storage_version() cpu_map = self.get_cup_information() ha_status_map = self.get_data_query(consts.HA_STATUS) for sp_name in sp_map.keys(): status_key = '{}{}'.format(sp_name, consts.HA_RUNNING_STATUS) status = ha_status_map.get(status_key).lower() \ if ha_status_map.get(status_key) else None soft_version = sp_map.get(sp_name, {}).get( '{}{}'.format(sp_name, consts.ODSP_MSC_VERSION_KEY)) cpu_vendor_id = cpu_map.get(sp_name, {}).get( '{}{}'.format(sp_name, consts.PROCESSOR_VENDOR_KEY)) cpu_frequency = cpu_map.get(sp_name, {}).get( '{}{}'.format(sp_name, consts.PROCESSOR_FREQUENCY_KEY)) cpu_info = '' if cpu_vendor_id and cpu_frequency: cpu_info = '{}@{}'.format(cpu_vendor_id, cpu_frequency) controller_model = { 'name': sp_name, 'storage_id': storage_id, 'native_controller_id': sp_name, 'status': consts.CONTROLLERS_STATUS_MAP.get( status, constants.ControllerStatus.UNKNOWN), 'location': sp_name, 'soft_version': soft_version, 'cpu_info': cpu_info } if cpu_info: controller_model['cpu_count'] = consts.digital_constant.ONE_INT controllers_list.append(controller_model) return controllers_list def get_cup_information(self): cpu_res = self.do_exec(consts.SYSTEM_CPU) sp_map = {} if cpu_res: cpu_res_list = cpu_res.strip(). \ replace('\r', '').split('\n') sp_cpu_map = {} sp = None bag = True for row_cpu in (cpu_res_list or []): row_pattern = re.compile(consts.SYSTEM_CPU_SP_REGULAR) row_search = row_pattern.search(row_cpu) if row_search: bag = False sp = row_cpu.replace( consts.LEFT_HALF_BRACKET, '').replace( consts.CPU_INFORMATION_BRACKET, '').replace(' ', '') if bag: continue if consts.COLON in row_cpu: row_version_list = row_cpu.replace(' ', '').split( consts.COLON, digital_constant.ONE_INT) key = row_version_list[digital_constant.ZERO_INT] sp_cpu_map[key] = row_version_list[ digital_constant.ONE_INT] if not row_cpu: sp_map[sp] = sp_cpu_map sp_cpu_map = {} sp = None return sp_map def list_disks(self, storage_id): disk_list = [] disks = self.get_disks() for disk in disks: disk_name = disk.get('Name') physical = disk.get('Type').lower() if disk.get('Type') else None logical = disk.get('Role').lower() if disk.get('Role') else None status = disk.get('HealthStatus').lower() if \ disk.get('HealthStatus') else None disk_model = { 'name': disk_name, 'storage_id': storage_id, 'native_disk_id': disk_name, 'serial_number': disk.get('SerialNumber'), 'manufacturer': disk.get('Vendor'), 'model': disk.get('Model'), 'firmware': disk.get('FWVersion'), 'location': disk_name, 'speed': int(disk.get('RPMs')) if disk.get('RPMs') else '', 'capacity': Tools.get_capacity_size(disk.get('Capacity')), 'status': consts.DISK_STATUS_MAP.get( status, constants.DiskStatus.NORMAL), 'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get( physical, constants.DiskPhysicalType.UNKNOWN), 'logical_type': consts.DISK_LOGICAL_TYPE_MAP.get( logical, constants.DiskLogicalType.UNKNOWN) } disk_list.append(disk_model) return disk_list def list_ports(self, storage_id): ports = self.get_fc_port_encapsulation(storage_id) ports.extend(self.get_sas_port_data(storage_id)) return ports def get_fc_port_encapsulation(self, storage_id): ports = [] fc_port_map = self.get_fc_port() for fc_port_id in fc_port_map.keys(): fc_port_id_upper = fc_port_id.upper() port_type = self.get_port_type(fc_port_id.lower()) fc_ports = fc_port_map.get(fc_port_id) status_int = fc_ports.get('onlinestate') native_parent_id = '{}{}'.format( consts.SP, self.numbers_character(fc_port_id)) fc_port_m = { 'native_port_id': fc_port_id_upper, 'name': fc_port_id_upper, 'type': port_type, 'logical_type': constants.PortLogicalType.PHYSICAL, 'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get( status_int, constants.PortConnectionStatus.UNKNOWN), 'health_status': constants.PortHealthStatus.UNKNOWN, 'location': fc_port_id_upper, 'storage_id': storage_id, 'native_parent_id': native_parent_id, 'speed': Tools.get_capacity_size(fc_ports.get('actualspeed')), 'wwn': fc_ports.get('wwn') } ports.append(fc_port_m) return ports @staticmethod def parse_alert(alert): try: if consts.PARSE_ALERT_DESCRIPTION in alert.keys(): alert_name = alert.get(consts.PARSE_ALERT_NAME) alert_name_e = alert_name.lower() alert_name_c = consts.ALERT_NAME_CONFIG.get( alert_name_e, alert_name) alert_model = dict() description = alert.get(consts.PARSE_ALERT_DESCRIPTION)\ .encode('iso-8859-1').decode('gbk') alert_model['alert_id'] = alert.get( consts.PARSE_ALERT_ALERT_ID) alert_model['severity'] = consts.PARSE_ALERT_SEVERITY_MAP.get( alert.get(consts.PARSE_ALERT_SEVERITY), constants.Severity.NOT_SPECIFIED) alert_model['category'] = constants.Category.FAULT alert_model['occur_time'] = Tools().time_str_to_timestamp( alert.get(consts.PARSE_ALERT_TIME), consts.TIME_PATTERN) alert_model['description'] = description alert_model['location'] = '{}:{}'.format(alert.get( consts.PARSE_ALERT_STORAGE), alert.get(consts.PARSE_ALERT_LOCATION)) alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['alert_name'] = alert_name_c match_key = '{}{}'.format(alert_name_c, description) alert_model['match_key'] = hashlib.md5( match_key.encode()).hexdigest() return alert_model except Exception as e: err_msg = "Failed to parse alert from " \ "macro_san ms: %s" % (six.text_type(e)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_storage_host_initiators(self, storage_id): initiators_list = [] initiators = self.get_data_list( consts.CLIENT_INITIATOR_GETLIST, consts.FIELDS_INITIATOR_ALIAS) for initiator in initiators: host_name = initiator.get('MappedClient') \ if initiator.get('MappedClient') else initiator.get( 'MappedHost') wwn = initiator.get('InitiatorWWN') online_status = initiator.get('OnlineStatus').lower() \ if initiator.get('OnlineStatus') else None initiator_type = initiator.get('Type').lower() \ if initiator.get('Type') else None initiator_d = { 'native_storage_host_initiator_id': wwn, 'name': wwn, 'alias': initiator.get('InitiatorAlias'), 'type': consts.INITIATOR_TYPE_MAP.get( initiator_type, constants.InitiatorType.UNKNOWN), 'status': consts.INITIATOR_STATUS_MAP.get( online_status, constants.InitiatorStatus.UNKNOWN), 'wwn': wwn, 'storage_id': storage_id } if consts.FIELDS_INITIATOR_HOST != host_name: initiator_d['native_storage_host_id'] = host_name initiators_list.append(initiator_d) return initiators_list def list_storage_hosts_old(self, storage_id): host_list = [] initiators_host_relation = self.get_initiators_host_relation() hosts = self.get_data_list(consts.CLIENT_LIST, consts.FIELDS_NAME, '') for host in hosts: host_name = host.get('Name') initiators = initiators_host_relation.get(host_name) os_type = constants.HostOSTypes.UNKNOWN if initiators: os_str = initiators.get('OS').lower() \ if initiators.get('OS') else None os_type = consts.HOST_OS_TYPES_MAP.get( os_str, constants.HostOSTypes.UNKNOWN) host_d = { 'name': host_name, 'storage_id': storage_id, 'native_storage_host_id': host_name, 'os_type': os_type, 'status': constants.HostStatus.NORMAL, 'description': host.get('Description') } host_list.append(host_d) return host_list def list_storage_hosts_new(self, storage_id): hosts_new = self.get_data_list(consts.CLIENT_HOST, consts.FIELDS_HOST_NAME, '') host_list = [] for host in hosts_new: host_name = host.get('Host Name') os = host.get('OS').lower() if host.get('OS') else None host_d = { 'name': host_name, 'storage_id': storage_id, 'native_storage_host_id': host_name, 'os_type': consts.HOST_OS_TYPES_MAP.get( os, constants.HostOSTypes.UNKNOWN), 'status': constants.HostStatus.NORMAL, 'description': host.get('Description') } if consts.FIELDS_INITIATOR_HOST != host.get('IP Address'): host_d['ip_address'] = host.get('IP Address') host_list.append(host_d) return host_list def list_storage_host_groups(self, storage_id): host_groups = self.get_data_list(consts.HOST_GROUP, consts.FIELDS_HOST_GROUP_NAME, '') storage_host_groups = [] host_grp_relation_list = [] for host_group in host_groups: host_group_name = host_group.get('Host Group Name') host_g = { 'name': host_group_name, 'storage_id': storage_id, 'native_storage_host_group_id': host_group_name, 'description': host_group.get('Description') } storage_host_groups.append(host_g) hosts = self.get_data_list( consts.HOST_GROUP_N.format(host_group_name), consts.FIELDS_HOST_NAME_TWO) for host in hosts: host_name = host.get('HostName') host_group_relation = { 'storage_id': storage_id, 'native_storage_host_group_id': host_group_name, 'native_storage_host_id': host_name } host_grp_relation_list.append(host_group_relation) result = { 'storage_host_groups': storage_host_groups, 'storage_host_grp_host_rels': host_grp_relation_list } return result def list_volume_groups(self, storage_id): volume_groups = self.get_data_list(consts.VOLUME_GROUP, consts.FIELDS_VOLUME_GROUP_NAME, '') volume_group_list = [] volume_grp_relation_list = [] for volume_group in volume_groups: volume_group_name = volume_group.get('LUN Group Name') volume_g = { 'name': volume_group_name, 'storage_id': storage_id, 'native_volume_group_id': volume_group_name, 'description': volume_group.get('Description') } volume_group_list.append(volume_g) volumes = self.get_data_list( consts.VOLUME_GROUP_N.format(volume_group_name), consts.FIELDS_LUN_NAME) for volume in volumes: volume_name = volume.get('LUNName') volume_group_relation = { 'storage_id': storage_id, 'native_volume_group_id': volume_group_name, 'native_volume_id': volume_name } volume_grp_relation_list.append(volume_group_relation) result = { 'volume_groups': volume_group_list, 'vol_grp_vol_rels': volume_grp_relation_list } return result def list_masking_views_old(self, storage_id): views = [] hosts = self.get_data_list(consts.CLIENT_LIST, consts.FIELDS_NAME) for host in hosts: host_name = host.get('Name') masking_list = self.get_data_list( consts.SHARE_LUN_LIST.format(host_name), consts.FIELDS_LUN_NAME) for masking_object in masking_list: volume_id = masking_object.get('LUNID') native_masking_view_id = '{}{}'.format(host_name, volume_id) view = { 'native_masking_view_id': native_masking_view_id, 'name': native_masking_view_id, 'native_storage_host_id': host_name, 'native_volume_id': volume_id, 'storage_id': storage_id } views.append(view) return views def list_masking_views_new(self, storage_id): views = self.get_data_list(consts.MAPVIEW, consts.FIELDS_MAPVIEW_NAME, '') views_list = [] for view in views: mapview_name = view.get('Mapview Name') view_d = { 'native_masking_view_id': mapview_name, 'name': mapview_name, 'native_storage_host_group_id': view.get('Host Group Name'), 'native_volume_group_id': view.get('LUN Group Name'), 'description': view.get('Description'), 'storage_id': storage_id } views_list.append(view_d) return views_list def do_exec(self, command_str, sleep_time=0.5, mix_time=consts.TIME_LIMIT): if self.down_lock: try: res = self.ssh_pool.do_exec_shell( [consts.ODSP_SH, command_str], sleep_time) except Exception as e: LOG.error('ssh Command(%s) execution info: %s' % ( command_str, six.text_type(e))) raise e else: try: res = self.ssh_pool.do_exec_shell([command_str], sleep_time) except Exception as e: LOG.error('cli Command(%s) execution info: %s' % ( command_str, six.text_type(e))) raise e if consts.FAILED_TAG in res or consts.UNKNOWN_COMMAND_TAG in res: return None if consts.SUCCESSFUL_TAG not in res: LOG.info('Command(%s) sleep(%s) return info: %s' % (command_str, sleep_time, res)) if sleep_time > mix_time: return None res = self.do_exec(command_str, sleep_time + 2, mix_time) return res def get_data_query(self, command): data_map = {} res = self.do_exec(command) if res is not None: row_res_list = res.strip().replace('\r', '').split('\n') for row_res in (row_res_list or []): if consts.COLON not in row_res: continue row_data_list = row_res.replace(' ', '').split( consts.COLON, digital_constant.ONE_INT) key = row_data_list[digital_constant.ZERO_INT] data_map[key] = row_data_list[digital_constant.ONE_INT] return data_map def get_storage_version(self): version_res = self.do_exec(consts.SYSTEM_VERSION) sp_map = {} if version_res: version_res_list = version_res.strip(). \ replace('\r', '').split('\n') sp_version_map = {} sp = None bag = True for row_version in (version_res_list or []): row_pattern = re.compile(consts.SYSTEM_VERSION_SP_REGULAR) row_search = row_pattern.search(row_version) if row_search: bag = False sp = row_version.replace( consts.LEFT_HALF_BRACKET, '').replace( consts.AFTER_HALF_BRACKET, '').replace(' ', '') if bag: continue if consts.COLON in row_version: row_version_list = row_version.replace(' ', '').split( consts.COLON, digital_constant.ONE_INT) key = row_version_list[digital_constant.ZERO_INT] sp_version_map[key] = row_version_list[ digital_constant.ONE_INT] if consts.ODSP_DRIVER_VERSION_KEY in key: sp_map[sp] = sp_version_map sp_version_map = {} return sp_map def get_data_list(self, command, contains_fields, space=' ', sleep_time=0.5, mix_time=consts.TIME_LIMIT): data_list = [] res = self.do_exec(command, sleep_time, mix_time) if res: res_list = res.strip().replace('\r', '').split('\n\n') for object_str in (res_list or []): object_str = object_str.replace(space, '') if contains_fields not in object_str: continue object_list = object_str.split('\n') data_map = {} for row_str in (object_list or []): if consts.COLON not in row_str: continue row_list = row_str.split( consts.COLON, digital_constant.ONE_INT) key = row_list[digital_constant.ZERO_INT].strip() data_map[key] = row_list[digital_constant.ONE_INT].strip() data_list.append(data_map) return data_list def get_volumes(self, storage_id): pools = self.list_storage_pools(storage_id) volumes = [] for pool in pools: pool_name = pool.get('name') lun_list = self.get_data_list( consts.LUN_LIST.format(pool_name), consts.FIELDS_NAME) for lun in lun_list: lun_name = lun.get('Name') lun_query = self.get_data_query( consts.LUN_QUERY.format(lun_name)) if lun_query: volumes.append(lun_query) return volumes def get_disks(self): disk_list = [] dsu_list = self.get_data_list(consts.DSU_LIST, consts.FIELDS_NAME) for dsu in dsu_list: dsu_name = dsu.get('Name') if not dsu_name: continue dsu_id = dsu_name.replace(consts.DSU, '') disks = self.get_data_list( consts.DISK_LIST.format(dsu_id), consts.FIELDS_NAME) for disk in disks: disk_name = disk.get('Name') if not disk_name: continue disk_id = disk_name.replace(consts.DISK, '') disk_map = self.get_data_query( consts.DISK_QUERY.format(disk_id)) if disk_map: disk_list.append(disk_map) return disk_list def get_fc_port(self): target_port_res = self.do_exec(consts.TARGET_QUERY_PORT_LIST) fc_port = {} if target_port_res: bag = True port_id = None port_map = {} target_port_list = target_port_res.replace('\r', '').split('\n') for port_row_str in target_port_list: port_row_str = port_row_str.replace(' ', '') row_pattern = re.compile(consts.TARGET_PORT_REGULAR) row_search = row_pattern.search(port_row_str) if row_search: if port_map: fc_port[port_id] = port_map port_map = {} port_id = port_row_str.replace(consts.PORT, '') bag = False continue if bag: continue if consts.COLON in port_row_str: port_row_list = port_row_str.split( consts.COLON, digital_constant.ONE_INT) port_key = port_row_list[digital_constant.ZERO_INT] port_map[port_key] = port_row_list[ digital_constant.ONE_INT] if consts.PORT_SUCCESSFUL_TAG in port_row_str: fc_port[port_id] = port_map return fc_port def get_sas_port_data(self, storage_id): sas_list = [] try: ha_status_map = self.get_data_query(consts.HA_STATUS) for ha_status_key in ha_status_map.keys(): if consts.SP not in ha_status_key: continue sp_num = ha_status_key.replace( consts.HA_RUNNING_STATUS, '').replace(consts.SP, '') dsu_list = self.get_data_list(consts.DSU_LIST, consts.FIELDS_NAME) for dsu in dsu_list: dsu_num = self.numbers_character(dsu.get('Name')) sas_data_map = self.get_sas_data_list( consts.SAS_PORT_LIST.format(sp_num, dsu_num), consts.FIELDS_LINK_STATUS) self.get_sas_encapsulation_data(sas_data_map, sas_list, storage_id) finally: return sas_list def get_sas_encapsulation_data(self, sas_data_map, sas_list, storage_id): for sas_port_id in sas_data_map.keys(): sas_object_map = sas_data_map.get(sas_port_id) status = sas_object_map.get( '{} Link Status'.format(sas_port_id)) max_speed = sas_object_map.get( '{} PHY Max Speed'.format(sas_port_id)) speed = sas_object_map.get( '{} PHY1 Speed'.format(sas_port_id)) native_parent_id = '{}{}'.format( consts.SP, self.numbers_character(sas_port_id)) sas_port_m = { 'native_port_id': sas_port_id, 'name': sas_port_id, 'type': constants.PortType.SAS, 'logical_type': constants.PortLogicalType.PHYSICAL, 'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get( status, constants.PortConnectionStatus.UNKNOWN), 'health_status': constants.PortHealthStatus.UNKNOWN, 'location': sas_port_id, 'storage_id': storage_id, 'native_parent_id': native_parent_id, 'max_speed': self.capacity_conversion(max_speed), 'speed': self.capacity_conversion(speed) } sas_list.append(sas_port_m) @staticmethod def capacity_conversion(capacity_str): capacity_int = consts.digital_constant.ZERO_INT if consts.GBPS in capacity_str: capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.G elif consts.MBPS in capacity_str: capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.M elif consts.KBPS in capacity_str: capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.k return capacity_int def get_sas_data_list(self, command, contains_fields): sas_data = {} res = self.do_exec(command) if res: res_list = res.strip().replace('\r', '').split('\n\n') for object_str in (res_list or []): if contains_fields not in object_str: continue object_list = object_str.split('\n') sas_object = {} sas_data_key = None for row_str in (object_list or []): if consts.COLON not in row_str: continue object_num = row_str.rindex(consts.COLON) object_key = row_str[:object_num].strip() object_num_one = object_num + consts.digital_constant. \ ONE_INT sas_object[object_key] = row_str[object_num_one:].strip() if consts.FIELDS_LINK_STATUS in row_str: sas_data_num = row_str.index(' ') sas_data_key = row_str[:sas_data_num] sas_data[sas_data_key] = sas_object return sas_data @staticmethod def get_port_type(fc_port_id_lower): if constants.PortType.FC in fc_port_id_lower: port_type = constants.PortType.FC elif constants.PortType.ISCSI in fc_port_id_lower: port_type = constants.PortType.ISCSI elif constants.PortType.SAS in fc_port_id_lower: port_type = constants.PortType.SAS elif constants.PortType.ETH in fc_port_id_lower: port_type = constants.PortType.ETH else: port_type = constants.PortType.OTHER return port_type @staticmethod def numbers_character(character_string): for character in list(character_string): if character.isdigit(): return character def get_initiators_host_relation(self): initiators_host = {} initiators = self.get_data_list( consts.CLIENT_INITIATOR_GETLIST, consts.FIELDS_INITIATOR_ALIAS) for initiator in initiators: host_id = initiator.get('MappedClient') initiators_host[host_id] = initiator return initiators_host def collect_perf_metrics(self, storage_id, resource_metrics, start_time, end_time): metrics = [] if not self.down_lock: return metrics LOG.info('The system(storage_id: %s) starts to collect macro_san' ' performance, start_time: %s, end_time: %s', storage_id, start_time, end_time) resource_storage = resource_metrics.get(constants.ResourceType.STORAGE) if resource_storage: storage_metrics = self.get_storage_metrics( end_time, resource_storage, start_time, storage_id) metrics.extend(storage_metrics) LOG.info('The system(storage_id: %s) stop to collect storage' ' performance, The length is: %s', storage_id, len(storage_metrics)) resource_volume = resource_metrics.get(constants.ResourceType.VOLUME) if resource_volume: volume_metrics = self.get_volume_metrics( end_time, resource_volume, start_time, storage_id) metrics.extend(volume_metrics) LOG.info('The system(storage_id: %s) stop to collect volume' ' performance, The length is: %s', storage_id, len(volume_metrics)) file_name_map = self.get_identification() resource_port = resource_metrics.get(constants.ResourceType.PORT) if resource_port: sas_port_metrics = self.get_port_metrics( end_time, resource_port, start_time, storage_id, consts.SAS_PORT, consts.SASPORT_REGULAR) metrics.extend(sas_port_metrics) LOG.info('The system(storage_id: %s) stop to collect sas port' ' performance, The length is: %s', storage_id, len(sas_port_metrics)) if file_name_map: fc_port_metrics = self.get_fc_port_metrics( end_time, resource_port, start_time, storage_id, file_name_map) metrics.extend(fc_port_metrics) LOG.info('The system(storage_id: %s) stop to collect fc port' ' performance, The length is: %s', storage_id, len(fc_port_metrics)) resource_disk = resource_metrics.get(constants.ResourceType.DISK) if resource_disk and file_name_map: disk_metrics = self.get_disk_metrics( end_time, resource_disk, start_time, storage_id, file_name_map) metrics.extend(disk_metrics) LOG.info('The system(storage_id: %s) stop to collect disk' ' performance, The length is: %s', storage_id, len(disk_metrics)) return metrics def get_fc_port_metrics(self, end_time, resource_disk, start_time, storage_id, file_name_map): local_path = self.down_perf_file(consts.FC_PORT, storage_id, consts.FCPORT_REGULAR) disk_metrics = [] if local_path: metrics_data = None try: metrics_data = self.analysis_per_file( local_path, start_time, end_time, consts.FC_PORT, file_name_map) except Exception as e: LOG.error('Failed to fc port analysis per file %s' % ( six.text_type(e))) finally: shutil.rmtree(local_path) if metrics_data: disk_metrics = self.packaging_metrics( storage_id, metrics_data, resource_disk, constants.ResourceType.PORT) return disk_metrics def get_disk_metrics(self, end_time, resource_disk, start_time, storage_id, file_name_map): local_path = self.down_perf_file( constants.ResourceType.DISK, storage_id, consts.DISK_REGULAR) disk_metrics = [] if local_path: metrics_data = None try: metrics_data = self.analysis_per_file( local_path, start_time, end_time, constants.ResourceType.DISK, file_name_map) except Exception as e: LOG.error('Failed to disk analysis per file %s' % ( six.text_type(e))) finally: shutil.rmtree(local_path) if metrics_data: disk_metrics = self.packaging_metrics( storage_id, metrics_data, resource_disk, constants.ResourceType.DISK) return disk_metrics def get_port_metrics(self, end_time, resource_port, start_time, storage_id, folder, pattern): local_path = self.down_perf_file(folder, storage_id, pattern) sas_port_metrics = [] if local_path: metrics_data = None try: metrics_data = self.analysis_per_file( local_path, start_time, end_time, folder) except Exception as e: LOG.error('Failed to sas port analysis per file %s' % ( six.text_type(e))) finally: shutil.rmtree(local_path) if metrics_data: sas_port_metrics = self.packaging_metrics( storage_id, metrics_data, resource_port, constants.ResourceType.PORT) return sas_port_metrics def get_volume_metrics(self, end_time, resource_volume, start_time, storage_id): local_path = self.down_perf_file( constants.ResourceType.VOLUME, storage_id, consts.LUN_REGULAR) volume_metrics = [] if local_path: metrics_data = None try: uuid_map = self.get_volume_uuid() metrics_data = self.analysis_per_file( local_path, start_time, end_time, constants.ResourceType.VOLUME, uuid_map) except Exception as e: LOG.error('Failed to volume analysis per file %s' % ( six.text_type(e))) finally: shutil.rmtree(local_path) if metrics_data: volume_metrics = self.packaging_metrics( storage_id, metrics_data, resource_volume, constants.ResourceType.VOLUME) return volume_metrics def get_storage_metrics(self, end_time, resource_storage, start_time, storage_id): local_path = self.down_perf_file(constants.ResourceType.STORAGE, storage_id, consts.STRAGE_REGULAR) storage_metrics = [] if local_path: metrics_data = None try: metrics_data = self.analysis_per_file( local_path, start_time, end_time, constants.ResourceType.STORAGE) except Exception as e: LOG.error('Failed to storage analysis per file %s' % ( six.text_type(e))) finally: shutil.rmtree(local_path) if metrics_data: resource_id, resource_name = self.get_storages() storage_metrics = self.storage_packaging_data( storage_id, metrics_data, resource_storage, resource_id, resource_name) return storage_metrics def get_storages(self): storage_data_map = self.get_data_query(consts.SYSTEM_QUERY) device_uuid = storage_data_map.get('DeviceUUID') storage_name = storage_data_map.get('DeviceName') resource_name = storage_name if storage_name else device_uuid resource_id = '{}:{}'.format(self.ssh_host, device_uuid) return resource_id, resource_name def down_perf_file(self, folder, storage_id, pattern): sftp = None tar = None local_path = '' try: ssh = self.ssh_pool.create() sftp = ssh.open_sftp() file_name_list = sftp.listdir(consts.FTP_PERF_PATH) ms_path = os.getcwd() localtime = int(round(time.time() * 1000)) local_path = consts.ADD_FOLDER.format( ms_path, folder, storage_id, localtime) os.mkdir(local_path) for file_name in file_name_list: title_pattern = re.compile(pattern) title_search_obj = title_pattern.search(file_name) if title_search_obj: local_path_file = '{}/{}'.format(local_path, file_name) ftp_path = '{}/{}'.format(consts.FTP_PERF_PATH, file_name) sftp.get(ftp_path, local_path_file) if consts.CSV in file_name: continue tar = tarfile.open(local_path_file) tar.extractall(local_path) except Exception as e: LOG.error('Failed to down perf file %s macro_san %s' % (folder, six.text_type(e))) if sftp: sftp.close() if tar: tar.close() return local_path def get_identification(self): identification = {} controller = self.get_controller() if not controller: return identification files = self.get_data_list( consts.SYSTEM_PERFORMANCE_FILE, consts.FIELDS_NAME, sleep_time=consts.digital_constant.TWELVE_INT, mix_time=consts.digital_constant.SIXTY) for file in files: sp = file.get('SPName') file_name = file.get('FileName') if controller != sp or not file_name: continue identification[file_name] = file.get('ObjectName') return identification def get_controller(self): res = self.ssh_pool.do_exec_shell([consts.VERSION_SHOW], consts.digital_constant.ONE_INT) if res: res_list = res.strip().replace('\r', '').split('\n') for res in res_list: if consts.SPECIAL_VERSION in res: controller = res.replace(' ', '').replace( consts.SPECIAL_VERSION, '') return controller def get_volume_uuid(self): uuid_map = {} pools = self.get_data_list(consts.POOL_LIST, consts.FIELDS_NAME) for pool in pools: pool_name = pool.get('Name') lun_list = self.get_data_list( consts.LUN_LIST.format(pool_name), consts.FIELDS_NAME) for lun in lun_list: lun_name = lun.get('Name') lun_query = self.get_data_query( consts.LUN_QUERY.format(lun_name)) uuid = lun_query.get('LUNUUID') uuid_map[uuid] = lun_name return uuid_map def analysis_per_file(self, local_path, start_time, end_time, resource_type, uuid_map=None): resource_key_data = {} resource_key = None if constants.ResourceType.STORAGE == resource_type: resource_key = resource_type list_dir = os.listdir(local_path) data = {} for dir_name in list_dir: dir_name = dir_name.replace(' ', '') if consts.CSV not in dir_name: continue resource_key = self.get_resource_key(dir_name, resource_key, resource_type, uuid_map) resource_data = resource_key_data.get(resource_key) if resource_data: data = resource_data with codecs.open('{}/{}'.format(local_path, dir_name), encoding='utf-8-sig') as f: for row in csv.DictReader( line.replace('\0', '') for line in f): time_str = row.get('') timestamp_s = self.get_timestamp_s(time_str) timestamp_ms = timestamp_s * units.k if timestamp_ms < start_time or timestamp_ms >= end_time: continue row_data, timestamp = self.get_perf_data(row, timestamp_s) data[timestamp] = row_data resource_key_data[resource_key] = data return resource_key_data @staticmethod def get_resource_key(dir_name, resource_key, resource_type, uuid_map): if constants.ResourceType.VOLUME == resource_type: uuid_list = dir_name.replace(consts.PERF_LUN, '').split( consts.PERF_SP) uuid = uuid_list[consts.digital_constant.ZERO_INT] resource_key = uuid_map.get(uuid) if consts.SAS_PORT == resource_type: uuid_list = dir_name.replace(consts.PERF_SAS_PORT, '').split( consts.PERF_SP) resource_key = uuid_list[consts.digital_constant.ZERO_INT] \ .replace('_', ':') if constants.ResourceType.DISK == resource_type or \ consts.FC_PORT == resource_type: resource_key = uuid_map.get(dir_name) if \ uuid_map.get(dir_name) else \ uuid_map.get(dir_name.replace('.csv', '.tgz')) return resource_key @staticmethod def get_perf_data(row, timestamp_s): timestamp = int(timestamp_s / consts.SIXTY) * consts.SIXTY * units.k throughput = round( (int(row.get('r&w/throughput(B)')) / units.Mi), 3) r_throughput = round( (int(row.get('r/throughput(B)')) / units.Mi), 3) w_throughput = round( (int(row.get('w/throughput(B)')) / units.Mi), 3) response = round( int(row.get('r&w/avg_rsp_time(us)')) / units.k, 3) r_response = round( int(row.get('r/avg_rsp_time(us)')) / units.k, 3) w_response = round( int(row.get('w/avg_rsp_time(us)')) / units.k, 3) cache_hit_ratio = round( int(row.get('r&w/cacherate(%*100)')), 3) r_cache_hit_ratio = round( int(row.get('r/cacherate(%*100)')), 3) w_cache_hit_ratio = round( int(row.get('w/cacherate(%*100)')), 3) row_data = { constants.StorageMetric.IOPS.name: round( int(row.get('r&w/iops')), 3), constants.StorageMetric.READ_IOPS.name: round( int(row.get('r/iops')), 3), constants.StorageMetric.WRITE_IOPS.name: round( int(row.get('w/iops')), 3), constants.StorageMetric.THROUGHPUT.name: throughput, constants.StorageMetric.READ_THROUGHPUT.name: r_throughput, constants.StorageMetric.WRITE_THROUGHPUT.name: w_throughput, constants.StorageMetric.RESPONSE_TIME.name: response, constants.StorageMetric.READ_RESPONSE_TIME.name: r_response, constants.StorageMetric.WRITE_RESPONSE_TIME.name: w_response, constants.StorageMetric.CACHE_HIT_RATIO.name: cache_hit_ratio, constants.StorageMetric.READ_CACHE_HIT_RATIO.name: r_cache_hit_ratio, constants.StorageMetric.WRITE_CACHE_HIT_RATIO.name: w_cache_hit_ratio } return row_data, timestamp @staticmethod def storage_packaging_data(storage_id, metrics_data, resource_metrics, resource_id, resource_name): metrics = [] for resource_key in resource_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': constants.ResourceType.STORAGE, 'resource_id': resource_id, 'resource_name': resource_name, 'type': 'RAW', 'unit': resource_metrics[resource_key]['unit'] } resource_value = {} time_key_data = metrics_data.get(constants.ResourceType.STORAGE) for time_key in time_key_data.keys(): resource_key_data = time_key_data.get(time_key) resource_data = resource_key_data.get(resource_key) resource_value[time_key] = resource_data metrics_res = constants.metric_struct( name=resource_key, labels=labels, values=resource_value) metrics.append(metrics_res) return metrics @staticmethod def packaging_metrics(storage_id, metrics_data, resource_metrics, resource_type): metrics = [] for resource_id in metrics_data.keys(): for resource_key in resource_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_id, 'resource_name': resource_id, 'type': 'RAW', 'unit': resource_metrics[resource_key]['unit'] } resource_value = {} resource_data = metrics_data.get(resource_id) for time_key in resource_data.keys(): resource_value[time_key] = \ resource_data.get(time_key, {}).get(resource_key) if resource_value: metrics_res = constants.metric_struct( name=resource_key, labels=labels, values=resource_value) metrics.append(metrics_res) return metrics @staticmethod def get_timestamp_s(time_str): timestamp_s = \ int(datetime.datetime.strptime( time_str, consts.MACRO_SAN_TIME_FORMAT).timestamp()) return timestamp_s def get_latest_perf_timestamp(self): timestamp = None if not self.down_lock: return timestamp res = self.ssh_pool.do_exec_shell([consts.GET_DATE]) if res: res_list = res.strip().replace('\r', '').split('\n') for row in res_list: if row.isdigit(): timestamp = int( int(row) / consts.SIXTY) * consts.SIXTY * units.k return timestamp ================================================ FILE: delfin/drivers/macro_san/ms/ms_stor.py ================================================ # Copyright 2022 The SODA Authors. # Copyright (c) 2022 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from delfin.common import constants from delfin.drivers import driver from delfin.drivers.macro_san.ms import ms_handler, consts from delfin.drivers.macro_san.ms.ms_handler import MsHandler LOG = log.getLogger(__name__) class MacroSanDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.ms_handler = ms_handler.MsHandler(**kwargs) self.login = self.ms_handler.login() def get_storage(self, context): return self.ms_handler.get_storage(self.storage_id) def list_storage_pools(self, context): return self.ms_handler.list_storage_pools(self.storage_id) def list_volumes(self, context): return self.ms_handler.list_volumes(self.storage_id) def list_controllers(self, context): return self.ms_handler.list_controllers(self.storage_id) def list_disks(self, context): return self.ms_handler.list_disks(self.storage_id) def list_ports(self, context): return self.ms_handler.list_ports(self.storage_id) def list_alerts(self, context, query_para=None): raise NotImplementedError( "Macro_SAN Driver SSH list_alerts() is not Implemented") @staticmethod def parse_alert(context, alert): return MsHandler.parse_alert(alert) def clear_alert(self, context, alert): pass def remove_trap_config(self, context, trap_config): pass def add_trap_config(self, context, trap_config): pass def reset_connection(self, context, **kwargs): pass def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): return self.ms_handler.collect_perf_metrics( self.storage_id, resource_metrics, start_time, end_time) @staticmethod def get_capabilities(context, filters=None): return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.STORAGE: consts.STORAGE_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP } } def get_latest_perf_timestamp(self, context): return self.ms_handler.get_latest_perf_timestamp() def list_storage_host_initiators(self, context): return self.ms_handler.list_storage_host_initiators(self.storage_id) def list_storage_hosts(self, context): host_list = self.ms_handler.list_storage_hosts_new(self.storage_id) if not host_list: host_list = self.ms_handler.list_storage_hosts_old(self.storage_id) return host_list def list_storage_host_groups(self, context): return self.ms_handler.list_storage_host_groups(self.storage_id) def list_volume_groups(self, context): return self.ms_handler.list_volume_groups(self.storage_id) def list_masking_views(self, context): views = self.ms_handler.list_masking_views_new(self.storage_id) if not views: views = self.ms_handler.list_masking_views_old(self.storage_id) return views ================================================ FILE: delfin/drivers/manager.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import six import stevedore import threading from oslo_log import log from delfin import db from delfin import exception from delfin import utils from delfin import ssl_utils from delfin.common import constants LOG = log.getLogger(__name__) @six.add_metaclass(utils.Singleton) class DriverManager(stevedore.ExtensionManager): _instance_lock = threading.Lock() NAMESPACE = 'delfin.storage.drivers' def __init__(self): super(DriverManager, self).__init__(self.NAMESPACE) # The driver_factory will keep the driver instance for # each of storage systems so that the session between driver # and storage system is effectively used. self.driver_factory = dict() def get_driver(self, context, invoke_on_load=True, cache_on_load=True, **kwargs): """Get a driver from manager. :param context: The context of delfin. :type context: delfin.context.RequestContext :param invoke_on_load: Boolean to decide whether to return the driver object. :type invoke_on_load: bool :param cache_on_load: Boolean to decide whether save driver object in driver_factory when generating a new driver object. It takes effect when invoke_on_load is True. :type cache_on_load: bool :param kwargs: Parameters from access_info. """ context.storage_id = kwargs.get('storage_id') kwargs = copy.deepcopy(kwargs) kwargs['verify'] = False ca_path = ssl_utils.get_storage_ca_path() if ca_path: ssl_utils.verify_ca_path(ca_path) kwargs['verify'] = ca_path if not invoke_on_load: return self._get_driver_cls(**kwargs) else: return self._get_driver_obj(context, cache_on_load, **kwargs) def update_driver(self, storage_id, driver): self.driver_factory[storage_id] = driver def remove_driver(self, storage_id): """Clear driver instance from driver factory.""" self.driver_factory.pop(storage_id, None) def _get_driver_obj(self, context, cache_on_load=True, **kwargs): if not cache_on_load or not kwargs.get('storage_id'): if kwargs['verify']: ssl_utils.reload_certificate(kwargs['verify']) cls = self._get_driver_cls(**kwargs) return cls(**kwargs) if kwargs['storage_id'] in self.driver_factory: return self.driver_factory[kwargs['storage_id']] with self._instance_lock: if kwargs['storage_id'] in self.driver_factory: return self.driver_factory[kwargs['storage_id']] if kwargs['verify']: ssl_utils.reload_certificate(kwargs['verify']) access_info = copy.deepcopy(kwargs) storage_id = access_info.pop('storage_id') access_info.pop('verify') if access_info: cls = self._get_driver_cls(**kwargs) driver = cls(**kwargs) else: access_info = db.access_info_get( context, storage_id).to_dict() access_info_dict = copy.deepcopy(access_info) remove_fields = ['created_at', 'updated_at', 'storage_id', 'storage_name', 'extra_attributes'] # Remove unrelated query fields for field in remove_fields: if access_info_dict.get(field): access_info_dict.pop(field) for access in constants.ACCESS_TYPE: if access_info_dict.get(access): access_info_dict.pop(access) access_info_list = db.access_info_get_all( context, filters=access_info_dict) for _access_info in access_info_list: if _access_info['storage_id'] in self.driver_factory: driver = self.driver_factory[ _access_info['storage_id']] driver.add_storage(access_info) self.driver_factory[storage_id] = driver return driver access_info['verify'] = kwargs.get('verify') cls = self._get_driver_cls(**access_info) driver = cls(**access_info) self.driver_factory[storage_id] = driver return driver def _get_driver_cls(self, **kwargs): """Get driver class from entry points.""" name = '%s %s' % (kwargs.get('vendor'), kwargs.get('model')) if name in self.names(): return self[name].plugin msg = "Storage driver '%s' could not be found." % name LOG.error(msg) raise exception.StorageDriverNotFound(name) ================================================ FILE: delfin/drivers/netapp/__init__.py ================================================ ================================================ FILE: delfin/drivers/netapp/dataontap/__init__.py ================================================ ================================================ FILE: delfin/drivers/netapp/dataontap/cluster_mode.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.drivers import driver from delfin.drivers.netapp.dataontap import netapp_handler from delfin.drivers.netapp.dataontap.netapp_handler import NetAppHandler class NetAppCmodeDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.netapp_handler = netapp_handler.NetAppHandler(**kwargs) self.netapp_handler.login() def reset_connection(self, context, **kwargs): self.netapp_handler.login() def get_storage(self, context): return self.netapp_handler.get_storage() def list_storage_pools(self, context): return self.netapp_handler.list_storage_pools(self.storage_id) def list_volumes(self, context): return self.netapp_handler.list_volumes(self.storage_id) def list_controllers(self, context): return self.netapp_handler.list_controllers(self.storage_id) def list_ports(self, context): return self.netapp_handler.list_ports(self.storage_id) def list_disks(self, context): return self.netapp_handler.list_disks(self.storage_id) def list_alerts(self, context, query_para=None): return self.netapp_handler.list_alerts(query_para) def list_qtrees(self, context): return self.netapp_handler.list_qtrees(self.storage_id) def list_quotas(self, context): return self.netapp_handler.list_quotas(self.storage_id) def list_filesystems(self, context): return self.netapp_handler.list_filesystems(self.storage_id) def list_shares(self, context): return self.netapp_handler.list_shares(self.storage_id) def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): return NetAppHandler.parse_alert(alert) def clear_alert(self, context, alert): return self.netapp_handler.clear_alert(alert) @staticmethod def get_access_url(): return 'https://{ip}' def get_alert_sources(self, context): return self.netapp_handler.get_alert_sources() def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): return self.netapp_handler.collect_perf_metrics( storage_id, resource_metrics, start_time, end_time) @staticmethod def get_capabilities(context, filters=None): return NetAppHandler.get_capabilities(filters) def get_latest_perf_timestamp(self, context): return self.netapp_handler.get_latest_perf_timestamp() def list_storage_host_initiators(self, context): return self.netapp_handler.\ list_storage_host_initiators(self.storage_id) def list_port_groups(self, context): return self.netapp_handler.list_port_groups(self.storage_id) def list_masking_views(self, context): return self.netapp_handler.list_masking_views(self.storage_id) def list_storage_hosts(self, context): return self.netapp_handler.list_storage_hosts(self.storage_id) ================================================ FILE: delfin/drivers/netapp/dataontap/constants.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from delfin.common import constants SOCKET_TIMEOUT = 15 AUTH_KEY = 'Authorization' RETURN_SUCCESS_CODE = 200 CREATED_SUCCESS_CODE = 201 ACCEPTED_RETURN_CODE = 202 BAD_REQUEST_RETURN_CODE = 400 UNAUTHORIZED_RETURN_CODE = 401 FORBIDDEN_RETURN_CODE = 403 NOT_FOUND_RETURN_CODE = 404 METHOD_NOT_ALLOWED_CODE = 405 CONFLICT_RETURN_CODE = 409 INTERNAL_ERROR_CODE = 500 HOUR_STAMP = '1h' DAY_STAMP = '1d' MONTH_STAMP = '1m' WEEK_STAMP = '1w' YEAR_STAMP = '1y' CLUSTER_PERF_URL = '/api/cluster/metrics?interval=1h&fields=iops,' \ 'throughput,latency' POOL_PERF_URL = '/api/storage/aggregates/%s/metrics?interval=1h&'\ 'fields=iops,throughput,latency' VOLUME_PERF_URL = '/api/storage/luns/%s/metrics?interval=1h&fields=iops,'\ 'throughput,latency' FS_PERF_URL = '/api/storage/volumes/%s/metrics?interval=1h&fields=iops,'\ 'throughput,latency' FC_PERF_URL = '/api/network/fc/ports/%s/metrics?interval=1h&fields=iops,'\ 'throughput,latency' ETH_PERF_URL = '/api/network/ethernet/ports/%s/metrics?interval=1h&'\ 'fields=throughput' FS_INFO_URL = '/api/storage/volumes?fields=svm' FC_INFO_URL = '/api/network/fc/ports' ETH_INFO_URL = '/api/network/ethernet/ports?fields=node' PERF_MAP = { 'iops': ['iops', 'total'], 'readIops': ['iops', 'read'], 'writeIops': ['iops', 'write'], 'throughput': ['throughput', 'total'], 'readThroughput': ['throughput', 'read'], 'writeThroughput': ['throughput', 'write'], 'responseTime': ['latency', 'total'] } PATTERN = re.compile('^[-]{3,}') FLOAT_PATTERN = r"\d\.\d" IP_PATTERN = re.compile(r'(([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])\.){3}' r'([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])$') IQN_PATTERN = re.compile('^[i][q][n].') WWN_PATTERN = re.compile('^(([a-z|0-9]){2}:){7}(([a-z|0-9]){2})') INITIATOR_KEY = 'loggedin' CLUSTER_SHOW_COMMAND = "cluster identity show" VERSION_SHOW_COMMAND = "version" STORAGE_STATUS_COMMAND = "system health status show" POOLS_SHOW_DETAIL_COMMAND = "storage pool show -instance" AGGREGATE_SHOW_DETAIL_COMMAND = "storage aggregate show -instance" FS_SHOW_DETAIL_COMMAND = "vol show -instance" THIN_FS_SHOW_COMMAND = "vol show -space-guarantee none" ALTER_SHOW_DETAIL_COMMAND = "system health alert show -instance" EVENT_SHOW_DETAIL_COMMAND = "event show -instance -severity EMERGENCY" EVENT_TIME_TYPE = '%m/%d/%Y %H:%M:%S' ALTER_TIME_TYPE = '%a %b %d %H:%M:%S %Y' CLEAR_ALERT_COMMAND = \ "system health alert delete -alerting-resource * -alert-id" DISK_SHOW_DETAIL_COMMAND = "disk show -instance" DISK_SHOW_PHYSICAL_COMMAND = "disk show -physical" DISK_ERROR_COMMAND = "disk error show" LUN_SHOW_DETAIL_COMMAND = "lun show -instance" CONTROLLER_SHOW_DETAIL_COMMAND = "node show -instance" PORT_SHOW_DETAIL_COMMAND = "network port show -instance" INTERFACE_SHOW_DETAIL_COMMAND = "network interface show -instance" FC_PORT_SHOW_DETAIL_COMMAND = "fcp adapter show -instance" QTREE_SHOW_DETAIL_COMMAND = "qtree show -instance" CIFS_SHARE_SHOW_DETAIL_COMMAND = "vserver cifs share show -instance" \ " -vserver %(vserver_name)s" SHARE_AGREEMENT_SHOW_COMMAND = "vserver show -fields Allowed-protocols" VSERVER_SHOW_COMMAND = "vserver show -type data" NFS_SHARE_SHOW_COMMAND = "volume show -junction-active true -instance" STORAGE_VENDOR = "NetApp" STORAGE_MODEL = "cmodel" QUOTA_SHOW_DETAIL_COMMAND = "volume quota policy rule show -instance" MGT_IP_COMMAND = "network interface show -fields address -role cluster-mgmt" NODE_IP_COMMAND = "network interface show -fields address -role node-mgmt" CONTROLLER_IP_COMMAND = "network interface show -fields " \ "curr-node,address -role node-mgmt" HOST_COMMAND = "igroup show -instance" PORT_GROUP_COMMAND = "portset show -instance" LIF_COMMAND = "network interface show -instance" FC_INITIATOR_COMMAND = "fcp initiator show -instance" ISCSI_INITIATOR_COMMAND = "iscsi initiator show -instance" LUN_MAPPING_COMMAND = 'lun mapping show -instance' SECURITY_STYLE = { 'mixed': constants.NASSecurityMode.MIXED, 'ntfs': constants.NASSecurityMode.NTFS, 'unix': constants.NASSecurityMode.UNIX } STORAGE_STATUS = { 'ok': constants.StorageStatus.NORMAL, 'ok-with-suppressed': constants.StorageStatus.NORMAL, 'degraded': constants.StorageStatus.DEGRADED, 'unreachable': constants.StorageStatus.ABNORMAL, 'unknown': constants.StorageStatus.ABNORMAL } AGGREGATE_STATUS = { 'online': constants.StoragePoolStatus.NORMAL, 'creating': constants.StoragePoolStatus.NORMAL, 'mounting': constants.StoragePoolStatus.NORMAL, 'relocating': constants.StoragePoolStatus.NORMAL, 'quiesced': constants.StoragePoolStatus.NORMAL, 'quiescing': constants.StoragePoolStatus.NORMAL, 'unmounted': constants.StoragePoolStatus.OFFLINE, 'unmounting': constants.StoragePoolStatus.OFFLINE, 'destroying': constants.StoragePoolStatus.ABNORMAL, 'partial': constants.StoragePoolStatus.ABNORMAL, 'frozen': constants.StoragePoolStatus.ABNORMAL, 'reverted': constants.StoragePoolStatus.NORMAL, 'restricted': constants.StoragePoolStatus.NORMAL, 'inconsistent': constants.StoragePoolStatus.ABNORMAL, 'iron_restricted': constants.StoragePoolStatus.ABNORMAL, 'unknown': constants.StoragePoolStatus.ABNORMAL, 'offline': constants.StoragePoolStatus.OFFLINE, 'failed': constants.StoragePoolStatus.ABNORMAL, 'remote_cluster': constants.StoragePoolStatus.NORMAL, } VOLUME_STATUS = { 'online': constants.VolumeStatus.AVAILABLE, 'offline': constants.VolumeStatus.ERROR, 'nvfail': constants.VolumeStatus.ERROR, 'space-error': constants.VolumeStatus.ERROR, 'foreign-lun-error': constants.VolumeStatus.ERROR, } ALERT_SEVERITY = { 'Unknown': constants.Severity.NOT_SPECIFIED, 'Other': constants.Severity.NOT_SPECIFIED, 'Information': constants.Severity.INFORMATIONAL, 'Degraded': constants.Severity.WARNING, 'Minor': constants.Severity.MINOR, 'Major': constants.Severity.MAJOR, 'Critical': constants.Severity.CRITICAL, 'Fatal': constants.Severity.FATAL, } DISK_TYPE = { 'ATA': constants.DiskPhysicalType.ATA, 'BSAS': constants.DiskPhysicalType.SATA, 'FCAL': constants.DiskPhysicalType.FC, 'FSAS': constants.DiskPhysicalType.NL_SAS, 'LUN': constants.DiskPhysicalType.LUN, 'SAS': constants.DiskPhysicalType.SAS, 'MSATA': constants.DiskPhysicalType.SATA, 'SSD': constants.DiskPhysicalType.SSD, 'VMDISK': constants.DiskPhysicalType.VMDISK, 'unknown': constants.DiskPhysicalType.UNKNOWN, } DISK_LOGICAL = { 'aggregate': constants.DiskLogicalType.AGGREGATE, 'spare': constants.DiskLogicalType.SPARE, 'unknown': constants.DiskLogicalType.UNKNOWN, 'free': constants.DiskLogicalType.FREE, 'broken': constants.DiskLogicalType.BROKEN, 'foreign': constants.DiskLogicalType.FOREIGN, 'labelmaint': constants.DiskLogicalType.LABELMAINT, 'maintenance': constants.DiskLogicalType.MAINTENANCE, 'shared': constants.DiskLogicalType.SHARED, 'unassigned': constants.DiskLogicalType.UNASSIGNED, 'unsupported': constants.DiskLogicalType.UNSUPPORTED, 'remote': constants.DiskLogicalType.REMOTE, 'mediator': constants.DiskLogicalType.MEDIATOR, } FS_STATUS = { 'online': constants.FilesystemStatus.NORMAL, 'restricted': constants.FilesystemStatus.FAULTY, 'offline': constants.FilesystemStatus.NORMAL, 'force-online': constants.FilesystemStatus.FAULTY, 'force-offline': constants.FilesystemStatus.FAULTY, } NETWORK_LOGICAL_TYPE = { 'data': constants.PortLogicalType.DATA, 'cluster': constants.PortLogicalType.CLUSTER, 'node-mgmt': constants.PortLogicalType.NODE_MGMT, 'cluster-mgmt': constants.PortLogicalType.CLUSTER_MGMT, 'intercluster': constants.PortLogicalType.INTERCLUSTER, } ETH_LOGICAL_TYPE = { 'physical': constants.PortLogicalType.PHYSICAL, 'if-group': constants.PortLogicalType.IF_GROUP, 'vlan': constants.PortLogicalType.VLAN, 'undef': constants.PortLogicalType.OTHER } FC_TYPE = { 'fibre-channel': constants.PortType.FC, 'ethernet': constants.PortType.FCOE } WORM_TYPE = { 'non-snaplock': constants.WORMType.NON_WORM, 'compliance': constants.WORMType.COMPLIANCE, 'enterprise': constants.WORMType.ENTERPRISE, '-': constants.WORMType.NON_WORM } QUOTA_TYPE = { 'user': constants.QuotaType.USER, 'tree': constants.QuotaType.TREE, 'group': constants.QuotaType.GROUP } NETWORK_PORT_TYPE = { 'nfs': constants.PortType.NFS, 'cifs': constants.PortType.CIFS, 'iscsi': constants.PortType.ISCSI, 'fcp': constants.PortType.FC, 'fcache': constants.PortType.FCACHE, 'none': constants.PortType.OTHER, } SEVERITY_MAP = { 'AccessCache.ReachedLimits': 'EMERGENCY', 'LUN.inconsistent.filesystem': 'EMERGENCY', 'LUN.nvfail.vol.proc.failed': 'EMERGENCY', 'Nblade.DidNotInitialize': 'EMERGENCY', 'Nblade.cifsNoPrivShare': 'EMERGENCY', 'Nblade.nfsV4PoolExhaust': 'EMERGENCY', 'Nblade.vscanNoScannerConn': 'EMERGENCY', 'adt.dest.directory.full': 'EMERGENCY', 'adt.dest.directory.unavail': 'EMERGENCY', 'adt.dest.volume.offline': 'EMERGENCY', 'adt.service.block': 'EMERGENCY', 'adt.service.ro.filesystem': 'EMERGENCY', 'adt.stgvol.nospace': 'EMERGENCY', 'adt.stgvol.offline': 'EMERGENCY', 'api.engine.killed': 'EMERGENCY', 'app.log.emerg': 'EMERGENCY', 'arl.aggrOnlineFailed': 'EMERGENCY', 'bge.EepromCrc': 'EMERGENCY', 'boot.bootmenu.issue': 'EMERGENCY', 'boot.varfs.backup.issue': 'EMERGENCY', 'bootfs.env.issue': 'EMERGENCY', 'callhome.battery.failure': 'EMERGENCY', 'callhome.ch.ps.fan.bad.xmin': 'EMERGENCY', 'callhome.chassis.overtemp': 'EMERGENCY', 'callhome.chassis.undertemp': 'EMERGENCY', 'callhome.clam.node.ooq': 'EMERGENCY', 'callhome.client.app.emerg': 'EMERGENCY', 'callhome.fans.failed': 'EMERGENCY', 'callhome.hba.failed': 'EMERGENCY', 'callhome.ibretimerprog.fail': 'EMERGENCY', 'callhome.mcc.auso.trig.fail': 'EMERGENCY', 'callhome.mcc.switchback.failed': 'EMERGENCY', 'callhome.mcc.switchover.failed': 'EMERGENCY', 'callhome.mdb.recovery.unsuccessful': 'EMERGENCY', 'callhome.netinet.dup.clustIP': 'EMERGENCY', 'callhome.nvram.failure': 'EMERGENCY', 'callhome.partner.down': 'EMERGENCY', 'callhome.ps.removed': 'EMERGENCY', 'callhome.raid.no.recover': 'EMERGENCY', 'callhome.raidtree.assim': 'EMERGENCY', 'callhome.rlm.replace': 'EMERGENCY', 'callhome.rlm.replace.lan': 'EMERGENCY', 'callhome.root.vol.recovery.reqd': 'EMERGENCY', 'callhome.sblade.lu.resync.to': 'EMERGENCY', 'callhome.sblade.lu.rst.hung': 'EMERGENCY', 'callhome.sblade.prop.fail': 'EMERGENCY', 'callhome.sfo.takeover.panic': 'EMERGENCY', 'callhome.shlf.fan': 'EMERGENCY', 'callhome.vol.space.crit': 'EMERGENCY', 'cf.fm.panicInToMode': 'EMERGENCY', 'cf.fm.reserveDisksOff': 'EMERGENCY', 'cf.fsm.autoGivebackAttemptsExceeded': 'EMERGENCY', 'cf.takeover.missing.ptnrDiskInventory': 'EMERGENCY', 'cf.takeover.missing.ptnrDisks': 'EMERGENCY', 'cft.trans.commit.failed': 'EMERGENCY', 'clam.node.ooq': 'EMERGENCY', 'config.localswitch': 'EMERGENCY', 'config.noBconnect': 'EMERGENCY', 'config.noPartnerLUNs': 'EMERGENCY', 'coredump.dump.failed': 'EMERGENCY', 'ctran.group.reset.failed': 'EMERGENCY', 'ctran.jpc.multiple.nodes': 'EMERGENCY', 'ctran.jpc.split.brain': 'EMERGENCY', 'ctran.jpc.valid.failed': 'EMERGENCY', 'disk.dynamicqual.failure.shutdown': 'EMERGENCY', 'ds.sas.xfer.unknown.error': 'EMERGENCY', 'ems.eut.prilo0_log_emerg': 'EMERGENCY', 'ems.eut.privar0_log_emerg_var': 'EMERGENCY', 'fci.adapter.firmware.update.failed': 'EMERGENCY', 'ha.takeoverImpHotShelf': 'EMERGENCY', 'haosc.invalid.config': 'EMERGENCY', 'license.capac.eval.shutdown': 'EMERGENCY', 'license.capac.shutdown': 'EMERGENCY', 'license.capac.unl.shutdown': 'EMERGENCY', 'license.subscription.enforcement': 'EMERGENCY', 'lmgr.aggr.CA.locks.dropped': 'EMERGENCY', 'lun.metafile.OOVC.corrupt': 'EMERGENCY', 'lun.metafile.VTOC.corrupt': 'EMERGENCY', 'mcc.auso.trigFailed': 'EMERGENCY', 'mcc.auso.triggerFailed': 'EMERGENCY', 'mgmtgwd.rootvol.recovery.changed': 'EMERGENCY', 'mgmtgwd.rootvol.recovery.different': 'EMERGENCY', 'mgmtgwd.rootvol.recovery.low.space': 'EMERGENCY', 'mgmtgwd.rootvol.recovery.new': 'EMERGENCY', 'mgmtgwd.rootvol.recovery.takeover.changed': 'EMERGENCY', 'mgr.boot.floppy_media': 'EMERGENCY', 'mgr.boot.reason_abnormal': 'EMERGENCY', 'mlm.array.portMixedAddress': 'EMERGENCY', 'monitor.chassisFanFail.xMinShutdown': 'EMERGENCY', 'monitor.fan.critical': 'EMERGENCY', 'monitor.globalStatus.critical': 'EMERGENCY', 'monitor.globalStatus.nonRecoverable': 'EMERGENCY', 'monitor.ioexpansionTemperature.cool': 'EMERGENCY', 'monitor.mismatch.shutdown': 'EMERGENCY', 'monitor.nvramLowBatteries': 'EMERGENCY', 'monitor.power.degraded': 'EMERGENCY', 'monitor.shelf.accessError': 'EMERGENCY', 'monitor.shutdown.brokenDisk': 'EMERGENCY', 'monitor.shutdown.chassisOverTemp': 'EMERGENCY', 'monitor.shutdown.emergency': 'EMERGENCY', 'monitor.shutdown.ioexpansionOverTemp': 'EMERGENCY', 'monitor.shutdown.ioexpansionUnderTemp': 'EMERGENCY', 'monitor.shutdown.nvramLowBatteries': 'EMERGENCY', 'monitor.shutdown.nvramLowBattery': 'EMERGENCY', 'netif.badEeprom': 'EMERGENCY', 'netif.overTempError': 'EMERGENCY', 'netif.uncorEccError': 'EMERGENCY', 'netinet.ethr.dup.clustIP': 'EMERGENCY', 'nodewatchdog.node.failure': 'EMERGENCY', 'nodewatchdog.node.longreboot': 'EMERGENCY', 'nodewatchdog.node.panic': 'EMERGENCY', 'nonha.resvConflictHalt': 'EMERGENCY', 'nv.fio.write.err': 'EMERGENCY', 'nv.none': 'EMERGENCY', 'nv2flash.copy2NVMEM.failure': 'EMERGENCY', 'nv2flash.copy2flash.failure': 'EMERGENCY', 'nv2flash.hw.failure': 'EMERGENCY', 'nv2flash.initfail': 'EMERGENCY', 'nvmem.battery.capLowCrit': 'EMERGENCY', 'nvmem.battery.capacity.low': 'EMERGENCY', 'nvmem.battery.current.high': 'EMERGENCY', 'nvmem.battery.currentHigh': 'EMERGENCY', 'nvmem.battery.currentLow': 'EMERGENCY', 'nvmem.battery.discFET.off': 'EMERGENCY', 'nvmem.battery.fccLowCrit': 'EMERGENCY', 'nvmem.battery.packInvalid': 'EMERGENCY', 'nvmem.battery.powerFault': 'EMERGENCY', 'nvmem.battery.temp.high': 'EMERGENCY', 'nvmem.battery.tempHigh': 'EMERGENCY', 'nvmem.battery.unread': 'EMERGENCY', 'nvmem.battery.voltage.high': 'EMERGENCY', 'nvmem.battery.voltageHigh': 'EMERGENCY', 'nvmem.battery.voltageLow': 'EMERGENCY', 'nvmem.voltage.high': 'EMERGENCY', 'nvram.battery.capacity.low.critical': 'EMERGENCY', 'nvram.battery.charging.nocharge': 'EMERGENCY', 'nvram.battery.current.high': 'EMERGENCY', 'nvram.battery.current.low': 'EMERGENCY', 'nvram.battery.dischargeFET.off': 'EMERGENCY', 'nvram.battery.fault': 'EMERGENCY', 'nvram.battery.fcc.low.critical': 'EMERGENCY', 'nvram.battery.not.present': 'EMERGENCY', 'nvram.battery.power.fault': 'EMERGENCY', 'nvram.battery.sensor.unreadable': 'EMERGENCY', 'nvram.battery.temp.high': 'EMERGENCY', 'nvram.battery.voltage.high': 'EMERGENCY', 'nvram.battery.voltage.low': 'EMERGENCY', 'nvram.decryptionKey.unavail': 'EMERGENCY', 'nvram.encryptionKey.initfail': 'EMERGENCY', 'nvram.hw.initFail': 'EMERGENCY', 'platform.insufficientMemory': 'EMERGENCY', 'pvif.allLinksDown': 'EMERGENCY', 'pvif.initMemFail': 'EMERGENCY', 'pvif.initMesgFail': 'EMERGENCY', 'raid.assim.disk.nolabels': 'EMERGENCY', 'raid.assim.fatal': 'EMERGENCY', 'raid.assim.fatal.upgrade': 'EMERGENCY', 'raid.assim.rg.missingChild': 'EMERGENCY', 'raid.assim.tree.degradedDirty': 'EMERGENCY', 'raid.assim.tree.multipleRootVols': 'EMERGENCY', 'raid.assim.upgrade.aggr.fail': 'EMERGENCY', 'raid.config.online.req.unsup': 'EMERGENCY', 'raid.disk.owner.change.fail': 'EMERGENCY', 'raid.mirror.bigio.restrict.failed': 'EMERGENCY', 'raid.mirror.bigio.wafliron.nostart': 'EMERGENCY', 'raid.multierr.unverified.block': 'EMERGENCY', 'raid.mv.defVol.online.fail': 'EMERGENCY', 'raid.rg.readerr.bad.file.block': 'EMERGENCY', 'raid.rg.readerr.wc.blkErr': 'EMERGENCY', 'raid.vol.volinfo.mismatch': 'EMERGENCY', 'rdb.recovery.failed': 'EMERGENCY', 'repl.checker.block.missing': 'EMERGENCY', 'repl.physdiff.invalid.hole': 'EMERGENCY', 'sas.adapter.firmware.update.failed': 'EMERGENCY', 'sas.cable.unqualified': 'EMERGENCY', 'sas.cpr.failed': 'EMERGENCY', 'sas.cpr.recoveryThreshold': 'EMERGENCY', 'scsiblade.kernel.volume.limbo.group': 'EMERGENCY', 'scsiblade.kernel.vserver.limbo.group': 'EMERGENCY', 'scsiblade.mgmt.wedged': 'EMERGENCY', 'scsiblade.prop.done.error': 'EMERGENCY', 'scsiblade.unavailable': 'EMERGENCY', 'scsiblade.vol.init.failed': 'EMERGENCY', 'scsiblade.volume.event.lost': 'EMERGENCY', 'scsiblade.vs.purge.fail': 'EMERGENCY', 'scsiblade.vserver.op.timeout': 'EMERGENCY', 'scsitarget.fct.postFailed': 'EMERGENCY', 'scsitarget.slifct.rebootRequired': 'EMERGENCY', 'secd.ldap.noServers': 'EMERGENCY', 'secd.lsa.noServers': 'EMERGENCY', 'secd.netlogon.noServers': 'EMERGENCY', 'secd.nis.noServers': 'EMERGENCY', 'ses.badShareStorageConfigErr': 'EMERGENCY', 'ses.config.IllegalEsh270': 'EMERGENCY', 'ses.config.shelfMixError': 'EMERGENCY', 'ses.psu.powerReqError': 'EMERGENCY', 'ses.shelf.em.ctrlFailErr': 'EMERGENCY', 'ses.status.enclError': 'EMERGENCY', 'ses.status.fanError': 'EMERGENCY', 'ses.status.volError': 'EMERGENCY', 'ses.system.em.mmErr': 'EMERGENCY', 'ses.unsupported.shelf.psu': 'EMERGENCY', 'ses.unsupported.shelves.psus': 'EMERGENCY', 'sfo.reassignFailed': 'EMERGENCY', 'snapmirror.replay.failed': 'EMERGENCY', 'sp.ipmi.lost.shutdown': 'EMERGENCY', 'spm.mgwd.process.exit': 'EMERGENCY', 'spm.secd.process.exit': 'EMERGENCY', 'spm.vifmgr.process.exit': 'EMERGENCY', 'spm.vldb.process.exit': 'EMERGENCY', 'ups.battery.critical.goodlinepower': 'EMERGENCY', 'ups.battery.warning': 'EMERGENCY', 'ups.battery.warning.goodlinepower': 'EMERGENCY', 'ups.inputpower.failed': 'EMERGENCY', 'ups.systemshutdown': 'EMERGENCY', 'vifmgr.clus.linkdown': 'EMERGENCY', 'vifmgr.cluscheck.l2ping': 'EMERGENCY', 'vifmgr.ipspace.tooMany': 'EMERGENCY', 'vldb.update.duringsofail': 'EMERGENCY', 'vol.phys.overalloc': 'EMERGENCY', 'vsa.inadequateVM': 'EMERGENCY', 'vsa.unlicensed': 'EMERGENCY', 'wafl.aggr.rsv.low.nomount': 'EMERGENCY', 'wafl.aggrtrans.outofspace.offline': 'EMERGENCY', 'wafl.bad.aggr.buftree.type': 'EMERGENCY', 'wafl.bad.vol.buftree.type': 'EMERGENCY', 'wafl.buf.badHeader': 'EMERGENCY', 'wafl.buf.freeingFreeBlock': 'EMERGENCY', 'wafl.failed.mount': 'EMERGENCY', 'wafl.failed.mount.bad.fsid': 'EMERGENCY', 'wafl.inconsistent.dirent': 'EMERGENCY', 'wafl.inconsistent.threshold.reached': 'EMERGENCY', 'wafl.iron.abort.offlineFail': 'EMERGENCY', 'wafl.iron.badfsid': 'EMERGENCY', 'wafl.iron.oc.abort.bad_blk': 'EMERGENCY', 'wafl.iron.oc.abort.clog_full': 'EMERGENCY', 'wafl.iron.oc.deletedChangeLog': 'EMERGENCY', 'wafl.iron.oc.errorCommitLog': 'EMERGENCY', 'wafl.iron.oc.root.lowMemory': 'EMERGENCY', 'wafl.mcc.so.nvram.warn': 'EMERGENCY', 'wafl.nvlog.checkFail': 'EMERGENCY', 'wafl.nvsave.replaying.fail': 'EMERGENCY', 'wafl.nvsave.saving.fail': 'EMERGENCY', 'wafl.offline.versionMismatch': 'EMERGENCY', 'wafl.online.fail.vmalign': 'EMERGENCY', 'wafl.online.notCompatibleVer': 'EMERGENCY', 'wafl.online.vbnMismatch': 'EMERGENCY', 'wafl.raid.incons.xidata': 'EMERGENCY', 'wafl.scan.typebits.diffFail': 'EMERGENCY', 'wafl.takeover.root.fail': 'EMERGENCY', 'wafl.takeover.vol.fail': 'EMERGENCY', 'wafl.vol.nvfail.offline': 'EMERGENCY', 'wafl.vol.walloc.rsv.failmount': 'EMERGENCY'} IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Input/output operations per second" } READ_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Read input/output operations per second" } WRITE_IOPS_DESCRIPTION = { "unit": "IOPS", "description": "Write input/output operations per second" } THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data is " "successfully transferred in MB/s" } READ_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data read is " "successfully transferred in MB/s" } WRITE_THROUGHPUT_DESCRIPTION = { "unit": "MB/s", "description": "Represents how much data write is " "successfully transferred in MB/s" } RESPONSE_TIME_DESCRIPTION = { "unit": "ms", "description": "Average time taken for an IO " "operation in ms" } CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of io that are cache hits" } READ_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of read ops that are cache hits" } WRITE_CACHE_HIT_RATIO_DESCRIPTION = { "unit": "%", "description": "Percentage of write ops that are cache hits" } IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of IO requests in KB" } READ_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of read IO requests in KB" } WRITE_IO_SIZE_DESCRIPTION = { "unit": "KB", "description": "The average size of write IO requests in KB" } CPU_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of CPU usage" } MEMORY_USAGE_DESCRIPTION = { "unit": "%", "description": "Percentage of DISK memory usage in percentage" } SERVICE_TIME = { "unit": 'ms', "description": "Service time of the resource in ms" } CAP_MAP = { "iops": IOPS_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, "throughput": THROUGHPUT_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "cacheHitRatio": CACHE_HIT_RATIO_DESCRIPTION, "readCacheHitRatio": READ_CACHE_HIT_RATIO_DESCRIPTION, "writeCacheHitRatio": WRITE_CACHE_HIT_RATIO_DESCRIPTION, "ioSize": IO_SIZE_DESCRIPTION, "readIoSize": READ_IO_SIZE_DESCRIPTION, "writeIoSize": WRITE_IO_SIZE_DESCRIPTION, } STORAGE_CAPABILITIES = { "throughput": THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "iops": IOPS_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, } POOL_CAPABILITIES = { "throughput": THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "iops": IOPS_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, } VOLUME_CAPABILITIES = { "throughput": THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "iops": IOPS_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, } PORT_CAPABILITIES = { "throughput": THROUGHPUT_DESCRIPTION, "responseTime": RESPONSE_TIME_DESCRIPTION, "iops": IOPS_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, } FS_CAPABILITIES = { "throughput": THROUGHPUT_DESCRIPTION, "iops": IOPS_DESCRIPTION, "readThroughput": READ_THROUGHPUT_DESCRIPTION, "writeThroughput": WRITE_THROUGHPUT_DESCRIPTION, "readIops": READ_IOPS_DESCRIPTION, "writeIops": WRITE_IOPS_DESCRIPTION, } HOST_OS_TYPE_MAP = { 'solaris': constants.HostOSTypes.SOLARIS, 'windows': constants.HostOSTypes.WINDOWS, 'hpux': constants.HostOSTypes.HP_UX, 'aix': constants.HostOSTypes.AIX, 'linux': constants.HostOSTypes.LINUX, 'netware': constants.HostOSTypes.UNKNOWN, 'vmware': constants.HostOSTypes.VMWARE_ESX, 'openvms': constants.HostOSTypes.OPEN_VMS, 'xen': constants.HostOSTypes.XEN_SERVER, 'hyper_v': constants.HostOSTypes.UNKNOWN } ================================================ FILE: delfin/drivers/netapp/dataontap/mapping_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants from delfin.drivers.netapp.dataontap import constants as constant from delfin.drivers.utils.tools import Tools class MappingHandler(object): @staticmethod def format_initiators(initiator_list, initiator_info, storage_id, protocol_type, is_default=False): initiator_map_list = [] Tools.split_value_map_list( initiator_info, initiator_map_list, is_mapping=True, split=':') if not is_default and protocol_type ==\ constants.InitiatorType.FC: MappingHandler.get_fc_initiator( initiator_list, initiator_map_list, storage_id) elif not is_default and protocol_type ==\ constants.InitiatorType.ISCSI: MappingHandler.get_iscsi_initiator( initiator_list, initiator_map_list, storage_id) if is_default: MappingHandler.get_initiator_from_host( initiator_list, initiator_map_list, storage_id) return initiator_list @staticmethod def duplicate_removal(initiator_list, initiator_model): is_same = False for initiator in initiator_list: if initiator['native_storage_host_initiator_id'] \ == initiator_model['native_storage_host_initiator_id']: is_same = True break if not is_same: initiator_list.append(initiator_model) @staticmethod def get_iscsi_initiator(initiator_list, initiator_map_list, storage_id): for initiator_map in initiator_map_list: if 'IgroupName' in initiator_map \ and initiator_map.get('IgroupName') == '-': initiator_id = \ initiator_map.get('InitiatorName').replace(' ', '') initiator_model = { 'native_storage_host_initiator_id': initiator_id, 'native_storage_host_id': None, 'name': initiator_id, 'alias': initiator_map.get('InitiatorAlias'), 'type': constants.InitiatorType.ISCSI, 'status': constants.InitiatorStatus.ONLINE, 'wwn': initiator_id, 'storage_id': storage_id, } MappingHandler.duplicate_removal( initiator_list, initiator_model) @staticmethod def get_fc_initiator(initiator_list, initiator_map_list, storage_id): for initiator_map in initiator_map_list: if 'IgroupName' in initiator_map \ and initiator_map.get('IgroupName') == '-': initiator_id = \ initiator_map.get('InitiatorWWPN').replace(' ', '') initiator_model = { 'native_storage_host_initiator_id': initiator_id, 'native_storage_host_id': None, 'name': initiator_id, 'alias': initiator_map.get('InitiatorWWPNAlias'), 'type': constants.InitiatorType.FC, 'status': constants.InitiatorStatus.ONLINE, 'wwn': initiator_map.get('InitiatorWWPN'), 'storage_id': storage_id, } MappingHandler.duplicate_removal( initiator_list, initiator_model) @staticmethod def get_initiator_type(protocol_type, initiator_name): if protocol_type != 'mixed': return constant.NETWORK_PORT_TYPE.get(protocol_type) else: if constant.IQN_PATTERN.search(initiator_name): return constants.PortType.ISCSI elif constant.WWN_PATTERN.search(initiator_name): return constants.PortType.FC return None @staticmethod def format_initiator(data_map, initiator_id, storage_id): initiator_id = initiator_id.split('(')[0] protocol_type = \ MappingHandler.get_initiator_type( data_map.get('Protocol'), initiator_id) host_id = '%s_%s' % (data_map.get('VserverName'), data_map.get('IgroupName')) initiator_model = { 'native_storage_host_initiator_id': initiator_id, 'native_storage_host_id': host_id, 'name': initiator_id, 'type': protocol_type, 'status': constants.InitiatorStatus.ONLINE, 'storage_id': storage_id, 'wwn': initiator_id } return initiator_model @staticmethod def get_initiator_from_host( initiator_list, initiator_map_list, storage_id): for initiator_map in initiator_map_list: if 'IgroupName' in initiator_map: initiator_id = \ initiator_map.get('Initiators').replace(' ', '') if initiator_map.get('Initiators') != '-': initiator_list.append( MappingHandler.format_initiator( initiator_map, initiator_id, storage_id)) for key in initiator_map: if constant.INITIATOR_KEY in key and key != '-': initiator_list.append( MappingHandler.format_initiator( initiator_map, key, storage_id)) @staticmethod def format_host(initiator_info, storage_id): initiator_map_list, initiator_list = [], [] Tools.split_value_map_list(initiator_info, initiator_map_list, split=':') for initiator_map in initiator_map_list: if 'IgroupName' in initiator_map: host_id = '%s_%s' % (initiator_map.get('VserverName'), initiator_map.get('IgroupName')) initiator_model = { 'native_storage_host_id': host_id, 'name': initiator_map.get('IgroupName'), 'os_type': constant.HOST_OS_TYPE_MAP.get( initiator_map.get('OSType')), 'status': constants.HostStatus.NORMAL, 'storage_id': storage_id, } initiator_list.append(initiator_model) return initiator_list @staticmethod def format_port_group(port_set_info, lif_info, storage_id): port_map_list, port_group_list = [], [] lif_map_list, port_group_relation_list = [], [] Tools.split_value_map_list(port_set_info, port_map_list, split=':') Tools.split_value_map_list(lif_info, lif_map_list, split=':') for port_map in port_map_list: if 'PortsetName' in port_map: port_group_id = "%s-%s-%s" % \ (port_map.get('VserverName'), port_map.get('PortsetName'), port_map.get('Protocol')) ports = \ port_map.get('LIFOrTPGName').replace(' ', '').split(',') ports_str = '' for lif_map in lif_map_list: if 'LogicalInterfaceName' in lif_map: if lif_map.get('LogicalInterfaceName') in ports: port_id = "%s_%s" % \ (lif_map['CurrentNode'], lif_map['CurrentPort']) port_group_relation = { 'storage_id': storage_id, 'native_port_group_id': port_group_id, 'native_port_id': port_id } port_group_relation_list.append( port_group_relation) if ports_str: ports_str = \ "{0},{1}".format(ports_str, port_id) else: ports_str = "{0}".format(port_id) port_group_model = { 'native_port_group_id': port_group_id, 'name': port_map.get('PortsetName'), 'ports': ports_str, 'storage_id': storage_id, } port_group_list.append(port_group_model) result = { 'port_groups': port_group_list, 'port_grp_port_rels': port_group_relation_list } return result @staticmethod def format_mapping_view(mapping_info, volume_info, storage_id, host_list): mapping_map_list, mapping_view_list, volume_map_list = [], [], [] Tools.split_value_map_list(mapping_info, mapping_map_list, split=":") Tools.split_value_map_list(volume_info, volume_map_list, split=":") for mapping_map in mapping_map_list: if 'LUNPath' in mapping_map: host_id = '%s_%s' % (mapping_map.get('VserverName'), mapping_map.get('IgroupName')) native_masking_view_id = \ '%s_%s_%s_%s' % (mapping_map.get('LUNNode'), mapping_map.get('VserverName'), mapping_map.get('IgroupName'), mapping_map.get('LUNName')) name = '%s_%s' % (mapping_map.get('IgroupName'), mapping_map.get('LUNName')) port_group_id = "%s-%s-%s" % \ (mapping_map.get('VserverName'), mapping_map.get('PortsetBindingIgroup'), mapping_map.get('IgroupProtocolType')) native_volume_id = None for volume_map in volume_map_list: if 'LUNName' in volume_map: if volume_map.get('LUNName') == \ mapping_map.get('LUNName') \ and volume_map.get('VserverName') == \ mapping_map.get('VserverName') \ and volume_map.get('LUNPath') == \ mapping_map.get('LUNPath'): native_volume_id = volume_map['SerialNumber'] mapping_view = { 'native_masking_view_id': native_masking_view_id, 'name': name, 'native_port_group_id': port_group_id, 'native_storage_host_id': host_id, 'native_volume_id': native_volume_id, 'storage_id': storage_id, } mapping_view_list.append(mapping_view) return mapping_view_list ================================================ FILE: delfin/drivers/netapp/dataontap/netapp_handler.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WarrayANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import time import requests import six import hashlib from oslo_log import log as logging from oslo_utils import units from delfin import cryptor from delfin.drivers.netapp.dataontap import constants as constant from delfin import exception, utils from delfin.common import constants from delfin.drivers.netapp.dataontap.mapping_handler import MappingHandler from delfin.drivers.netapp.dataontap.performance_handler \ import PerformanceHandler from delfin.drivers.utils.rest_client import RestClient from delfin.drivers.utils.ssh_client import SSHPool from delfin.drivers.utils.tools import Tools LOG = logging.getLogger(__name__) class NetAppHandler(object): OID_SERIAL_NUM = '1.3.6.1.4.1.789.1.1.9.0' OID_TRAP_DATA = '1.3.6.1.4.1.789.1.1.12.0' NODE_NAME = 'controller_name' SECONDS_TO_MS = 1000 NETAPP_NAA = '60a98000' def __init__(self, **kwargs): self.ssh_pool = SSHPool(**kwargs) self.rest_client = RestClient(**kwargs) self.rest_client.verify = kwargs.get('verify', False) self.rest_client.init_http_head() self.rest_client.session.auth = requests.auth.HTTPBasicAuth( self.rest_client.rest_username, cryptor.decode(self.rest_client.rest_password)) @staticmethod def get_table_data(values): header_index = 0 table = values.split("\r\n") for i in range(0, len(table)): if constant.PATTERN.search(table[i]): header_index = i return table[(header_index + 1):] @staticmethod def get_fs_id(vserver, volume): return vserver + '_' + volume @staticmethod def get_qt_id(vserver, volume, qtree): qt_id = vserver + '/' + volume if qtree != '': qt_id += '/' + qtree return qt_id @staticmethod def get_size(limit, is_calculate=False): if limit == '0B': return 0 if limit == '-': return 0 if is_calculate else '-' return int(Tools.get_capacity_size(limit)) @staticmethod def parse_alert(alert): try: alert_info = alert.get(NetAppHandler.OID_TRAP_DATA) node_name = alert.get(NetAppHandler.NODE_NAME) alert_info = alert_info.replace("]", '') alert_array = alert_info.split("[") alert_model = {} alert_map = {} if len(alert_array) > 1: category = constants.Category.FAULT \ if 'created' in alert_array[0] \ else constants.Category.RECOVERY alert_values = alert_array[1].split(",") for alert_value in alert_values: array = alert_value.split("=") if len(array) > 1: key = array[0].replace(' ', '') value = array[1].replace(' ', '').replace('.', '') alert_map[key] = value if alert_map and category == constants.Category.RECOVERY: alert_model = { 'alert_id': alert_map.get('AlertId'), 'alert_name': alert_map.get('AlertId'), 'severity': None, 'category': category, 'type': constants.EventType.EQUIPMENT_ALARM, 'occur_time': utils.utcnow_ms(), 'description': None, 'match_key': hashlib.md5( (alert_map.get('AlertId') + node_name + alert_map['AlertingResource'] ).encode()).hexdigest(), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': None } else: raise exception.IncompleteTrapInformation( constant.STORAGE_VENDOR) return alert_model except exception.IncompleteTrapInformation as err: raise err except Exception as err: err_msg = "Failed to parse alert from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def login(self): try: result = self.ssh_pool.do_exec('cluster identity show') if 'is not a recognized command' in result \ or 'command not found' in result: raise exception.InvalidIpOrPort() version = self.get_storage_version() if version >= 9.6: self.rest_client.do_call( constant.CLUSTER_PERF_URL, None, 'GET') except Exception as e: LOG.error("Failed to login netapp %s" % (six.text_type(e))) raise e def get_storage(self): try: raw_capacity = total_capacity = used_capacity = free_capacity = 0 controller_map_list = [] system_info = self.ssh_pool.do_exec( constant.CLUSTER_SHOW_COMMAND) version_info = self.ssh_pool.do_exec( constant.VERSION_SHOW_COMMAND) status_info = self.ssh_pool.do_exec( constant.STORAGE_STATUS_COMMAND) controller_info = self.ssh_pool.do_exec( constant.CONTROLLER_SHOW_DETAIL_COMMAND) Tools.split_value_map_list( controller_info, controller_map_list, ":") version_array = version_info.split("\r\n") storage_version = '' for version in version_array: if 'NetApp' in version: storage_version = version.split(":") break status = self.get_table_data(status_info) status = constant.STORAGE_STATUS.get(status[0].split()[0]) disk_list = self.get_disks(None) pool_list = self.list_storage_pools(None) storage_map_list = [] Tools.split_value_map_list( system_info, storage_map_list, split=':') if len(storage_map_list) > 0: storage_map = storage_map_list[-1] controller = None for controller_map in controller_map_list[1:]: if controller_map['Model'] != '-': controller = controller_map continue controller = controller_map_list[1] for disk in disk_list: raw_capacity += disk['capacity'] for pool in pool_list: total_capacity += pool['total_capacity'] free_capacity += pool['free_capacity'] used_capacity += pool['used_capacity'] storage_model = { "name": storage_map['ClusterName'], "vendor": constant.STORAGE_VENDOR, "model": controller['Model'], "status": status, "serial_number": storage_map['ClusterUUID'] + ':' + storage_map['ClusterSerialNumber'], "firmware_version": storage_version[0], "location": controller['Location'], "total_capacity": total_capacity, "raw_capacity": raw_capacity, "used_capacity": used_capacity, "free_capacity": free_capacity } return storage_model except exception.DelfinException as e: err_msg = "Failed to get storage from " \ "netapp cmode: %s" % (six.text_type(e.msg)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_aggregate(self, storage_id): agg_list = [] agg_info = self.ssh_pool.do_exec( constant.AGGREGATE_SHOW_DETAIL_COMMAND) agg_map_list = [] Tools.split_value_map_list(agg_info, agg_map_list, split=':') for agg_map in agg_map_list: if agg_map and 'Aggregate' in agg_map.keys(): status = constant.AGGREGATE_STATUS.get(agg_map['State']) pool_model = { 'name': agg_map['Aggregate'], 'storage_id': storage_id, 'native_storage_pool_id': agg_map['UUIDString'], 'description': None, 'status': status, 'storage_type': constants.StorageType.UNIFIED, 'total_capacity': self.get_size(agg_map['Size'], True), 'used_capacity': self.get_size(agg_map['UsedSize'], True), 'free_capacity': self.get_size(agg_map['AvailableSize'], True), } agg_list.append(pool_model) return agg_list def get_pool(self, storage_id): pool_list = [] pool_info = self.ssh_pool.do_exec( constant.POOLS_SHOW_DETAIL_COMMAND) pool_map_list = [] Tools.split_value_map_list(pool_info, pool_map_list, split=':') for pool_map in pool_map_list: if pool_map and 'StoragePoolName' in pool_map.keys(): status = constants.StoragePoolStatus.ABNORMAL if pool_map['IsPoolHealthy?'] == 'true': status = constants.StoragePoolStatus.NORMAL pool_model = { 'name': pool_map['StoragePoolName'], 'storage_id': storage_id, 'native_storage_pool_id': pool_map['UUIDofStoragePool'], 'description': None, 'status': status, 'storage_type': constants.StorageType.UNIFIED, 'total_capacity': self.get_size(pool_map['StoragePoolTotalSize'], True), 'used_capacity': self.get_size(pool_map['StoragePoolTotalSize'], True) - self.get_size(pool_map['StoragePoolUsableSize'], True), 'free_capacity': self.get_size(pool_map['StoragePoolUsableSize'], True) } pool_list.append(pool_model) return pool_list def list_storage_pools(self, storage_id): try: pool_list = self.get_pool(storage_id) agg_list = self.get_aggregate(storage_id) return agg_list + pool_list except exception.DelfinException as e: err_msg = "Failed to get storage pool from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage pool from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_volumes(self, storage_id): try: volume_list = [] volume_info = self.ssh_pool.do_exec( constant.LUN_SHOW_DETAIL_COMMAND) fs_list = self.get_filesystems(storage_id) volume_map_list = [] Tools.split_value_map_list(volume_info, volume_map_list, split=':') for volume_map in volume_map_list: if volume_map and 'LUNName' in volume_map.keys(): pool_id = None status = 'normal' if volume_map['State'] == 'online' \ else 'offline' for fs in fs_list: if fs['name'] == volume_map['VolumeName']: pool_id = fs['native_pool_id'] type = constants.VolumeType.THIN \ if volume_map['SpaceAllocation'] == 'enabled' \ else constants.VolumeType.THICK volume_model = { 'name': volume_map['LUNName'], 'storage_id': storage_id, 'description': None, 'status': status, 'native_volume_id': volume_map['SerialNumber'], 'native_storage_pool_id': pool_id, 'wwn': NetAppHandler.NETAPP_NAA + volume_map['SerialNumber(Hex)'], 'compressed': None, 'deduplicated': None, 'type': type, 'total_capacity': self.get_size(volume_map['LUNSize'], True), 'used_capacity': self.get_size(volume_map['UsedSize'], True), 'free_capacity': self.get_size(volume_map['LUNSize'], True) - self.get_size(volume_map['UsedSize'], True) } volume_list.append(volume_model) return volume_list except exception.DelfinException as e: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_alerts(self, query_para): alert_list = [] alert_info = self.ssh_pool.do_exec( constant.ALTER_SHOW_DETAIL_COMMAND) alert_map_list = [] Tools.split_value_map_list( alert_info, alert_map_list, True, split=':') for alert_map in alert_map_list: if alert_map and 'AlertID' in alert_map.keys(): occur_time = int(time.mktime(time.strptime( alert_map['IndicationTime'], constant.ALTER_TIME_TYPE))) if not query_para or \ (int(query_para['begin_time']) <= occur_time <= int(query_para['end_time'])): alert_model = { 'alert_id': alert_map['AlertID'], 'alert_name': alert_map['AlertID'], 'severity': constant.ALERT_SEVERITY [alert_map['PerceivedSeverity']], 'category': constants.Category.FAULT, 'type': constants.EventType.EQUIPMENT_ALARM, 'occur_time': occur_time * 1000, 'description': alert_map['Description'], 'sequence_number': alert_map['AlertID'], 'match_key': hashlib.md5( (alert_map['AlertID'] + alert_map['Node'] + alert_map['AlertingResource'] ).encode()).hexdigest(), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': alert_map['ProbableCause'] + ':' + alert_map['PossibleEffect'] } alert_list.append(alert_model) return alert_list def list_alerts(self, query_para): try: """Query the two alarms separately""" alert_list = self.get_alerts(query_para) return alert_list except exception.DelfinException as e: err_msg = "Failed to get storage alert from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage alert from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def clear_alert(self, alert): try: ssh_command = \ constant.CLEAR_ALERT_COMMAND + alert['alert_id'] self.ssh_pool.do_exec(ssh_command) except exception.DelfinException as e: err_msg = "Failed to get storage alert from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage alert from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_disks(self, storage_id): disks_list = [] physicals_list = [] disks_info = self.ssh_pool.do_exec( constant.DISK_SHOW_DETAIL_COMMAND) physicals_info = self.ssh_pool.do_exec( constant.DISK_SHOW_PHYSICAL_COMMAND) error_disk = self.ssh_pool.do_exec( constant.DISK_ERROR_COMMAND ) error_disk_list = [] error_disk_array = self.get_table_data(error_disk) for error_disk in error_disk_array: error_array = error_disk.split() if len(error_array) > 2: error_disk_list.append(error_array[0]) disks_map_list = [] physical_array = self.get_table_data(physicals_info) for physical in physical_array: physicals_list.append(physical.split()) Tools.split_value_map_list(disks_info, disks_map_list, split=':') for disks_map in disks_map_list: if disks_map and 'Disk' in disks_map.keys(): speed = physical_type = firmware = None logical_type = constant.DISK_LOGICAL. \ get(disks_map['ContainerType']) """Map disk physical information""" for physical_info in physicals_list: if len(physical_info) > 6 and \ physical_info[0] == disks_map['Disk']: physical_type = \ constant.DISK_TYPE.get(physical_info[1]) speed = physical_info[5] \ if physical_info[5] != '-' else 0 firmware = physical_info[4] status = constants.DiskStatus.NORMAL if disks_map['Disk'] in error_disk_list: status = constants.DiskStatus.ABNORMAL disk_model = { 'name': disks_map['Disk'], 'storage_id': storage_id, 'native_disk_id': disks_map['Disk'], 'serial_number': disks_map['SerialNumber'], 'manufacturer': disks_map['Vendor'], 'model': disks_map['Model'], 'firmware': firmware, 'speed': speed, 'capacity': self.get_size(disks_map['PhysicalSize'], True), 'status': status, 'physical_type': physical_type, 'logical_type': logical_type, 'native_disk_group_id': disks_map['Aggregate'], 'location': None, } disks_list.append(disk_model) return disks_list def get_filesystems(self, storage_id): fs_list = [] fs_info = self.ssh_pool.do_exec( constant.FS_SHOW_DETAIL_COMMAND) thin_fs_info = self.ssh_pool.do_exec( constant.THIN_FS_SHOW_COMMAND) pool_list = self.list_storage_pools(storage_id) thin_fs_array = self.get_table_data(thin_fs_info) fs_map_list = [] Tools.split_value_map_list(fs_info, fs_map_list, split=':') for fs_map in fs_map_list: type = constants.FSType.THICK if fs_map and 'VolumeName' in fs_map.keys(): pool_id = "" """get pool id""" for pool in pool_list: if pool['name'] == fs_map['AggregateName']: pool_id = pool['native_storage_pool_id'] deduplicated = True if fs_map['SpaceSavedbyDeduplication'] == '0B': deduplicated = False if len(thin_fs_array) > 2: for thin_vol in thin_fs_array: thin_array = thin_vol.split() if len(thin_array) > 4: if thin_array[1] == fs_map['VolumeName']: type = constants.VolumeType.THIN compressed = True if fs_map['VolumeContainsSharedorCompressedData'] == \ 'false': compressed = False status = constant.FS_STATUS.get(fs_map['VolumeState']) fs_id = self.get_fs_id( fs_map['VserverName'], fs_map['VolumeName']) fs_model = { 'name': fs_map['VolumeName'], 'storage_id': storage_id, 'native_filesystem_id': fs_id, 'native_pool_id': pool_id, 'compressed': compressed, 'deduplicated': deduplicated, 'worm': constant.WORM_TYPE.get(fs_map['SnapLockType']), 'status': status, 'security_mode': constant.SECURITY_STYLE.get( fs_map['SecurityStyle'], fs_map['SecurityStyle']), 'type': type, 'total_capacity': self.get_size(fs_map['VolumeSize']), 'used_capacity': self.get_size(fs_map['VolumeSize'], True) - self.get_size(fs_map['AvailableSize'], True), 'free_capacity': self.get_size(fs_map['AvailableSize']) } if fs_model['total_capacity'] != '-' \ and fs_model['total_capacity'] > 0: fs_list.append(fs_model) return fs_list def list_controllers(self, storage_id): try: controller_list = [] controller_info = self.ssh_pool.do_exec( constant.CONTROLLER_SHOW_DETAIL_COMMAND) controller_ips = self.ssh_pool.do_exec( constant.CONTROLLER_IP_COMMAND) ips_array = self.get_table_data(controller_ips) ip_map = {} controller_map_list = [] Tools.split_value_map_list( controller_info, controller_map_list, split=':') for controller_map in controller_map_list: if controller_map and 'Node' in controller_map.keys(): for ips in ips_array: ip_array = ips.split() key = value = '' if len(ip_array) == 4: for ip in ip_array: if ip == controller_map['Node']: key = ip if constant.IP_PATTERN.search(ip): value = ip ip_map[key] = value status = constants.ControllerStatus.NORMAL \ if controller_map['Health'] == 'true' \ else constants.ControllerStatus.OFFLINE controller_model = { 'name': controller_map['Node'], 'storage_id': storage_id, 'native_controller_id': controller_map['SystemID'], 'status': status, 'location': controller_map['Location'], 'soft_version': None, 'cpu_info': None, 'memory_size': None, 'mgmt_ip': ip_map.get(controller_map['Node']) } controller_list.append(controller_model) return controller_list except exception.DelfinException as e: err_msg = "Failed to get storage controllers from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage controllers from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_eth_port(self, storage_id): try: eth_list = [] eth_info = self.ssh_pool.do_exec( constant.PORT_SHOW_DETAIL_COMMAND) eth_map_list = [] Tools.split_value_map_list(eth_info, eth_map_list, split=':') for eth_map in eth_map_list: if eth_map and 'Port' in eth_map.keys(): logical_type = constant.ETH_LOGICAL_TYPE.get( eth_map['PortType']) port_id = \ eth_map['Node'] + '_' + eth_map['Port'] eth_model = { 'name': eth_map['Node'] + ':' + eth_map['Port'], 'storage_id': storage_id, 'native_port_id': port_id, 'location': eth_map['Node'] + ':' + eth_map['Port'], 'connection_status': constants.PortConnectionStatus.CONNECTED if eth_map['Link'] == 'up' else constants.PortConnectionStatus.DISCONNECTED, 'health_status': constants.PortHealthStatus.NORMAL if eth_map['PortHealthStatus'] == 'healthy' else constants.PortHealthStatus.ABNORMAL, 'type': constants.PortType.ETH, 'logical_type': logical_type, 'speed': int(eth_map['SpeedOperational']) * units.Mi if eth_map['SpeedOperational'] != '-' else 0, 'max_speed': int(eth_map['SpeedOperational']) * units.Mi if eth_map['SpeedOperational'] != '-' else 0, 'native_parent_id': None, 'wwn': None, 'mac_address': eth_map['MACAddress'], 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, } eth_list.append(eth_model) return eth_list except exception.DelfinException as e: err_msg = "Failed to get storage ports from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage ports from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_fc_port(self, storage_id): try: fc_list = [] fc_info = self.ssh_pool.do_exec( constant.FC_PORT_SHOW_DETAIL_COMMAND) fc_map_list = [] Tools.split_value_map_list(fc_info, fc_map_list, split=':') for fc_map in fc_map_list: if fc_map and 'Node' in fc_map.keys(): type = constant.FC_TYPE.get(fc_map['PhysicalProtocol']) port_id = \ fc_map['Node'] + '_' + fc_map['Adapter'] fc_model = { 'name': fc_map['Node'] + ':' + fc_map['Adapter'], 'storage_id': storage_id, 'native_port_id': port_id, 'location': fc_map['Node'] + ':' + fc_map['Adapter'], 'connection_status': constants.PortConnectionStatus.CONNECTED if fc_map['AdministrativeStatus'] == 'up' else constants.PortConnectionStatus.DISCONNECTED, 'health_status': constants.PortHealthStatus.NORMAL if fc_map['OperationalStatus'] == 'online' else constants.PortHealthStatus.ABNORMAL, 'type': type, 'logical_type': None, 'speed': int(fc_map['DataLinkRate(Gbit)']) * units.Gi if fc_map['DataLinkRate(Gbit)'] != '-' else 0, 'max_speed': int(fc_map['MaximumSpeed']) * units.Gi if fc_map['MaximumSpeed'] != '-' else 0, 'native_parent_id': None, 'wwn': fc_map['AdapterWWPN'], 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, } fc_list.append(fc_model) return fc_list except exception.DelfinException as e: err_msg = "Failed to get storage ports from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage ports from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_ports(self, storage_id): ports_list = \ self.get_fc_port(storage_id) + \ self.get_eth_port(storage_id) return ports_list def list_disks(self, storage_id): try: return self.get_disks(storage_id) except exception.DelfinException as e: err_msg = "Failed to get storage disks from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage disks from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_qtrees(self, storage_id): try: qt_list = [] qt_info = self.ssh_pool.do_exec( constant.QTREE_SHOW_DETAIL_COMMAND) fs_info = self.ssh_pool.do_exec( constant.FS_SHOW_DETAIL_COMMAND) fs_map_list = [] qt_map_list = [] Tools.split_value_map_list(fs_info, fs_map_list, split=':') Tools.split_value_map_list(qt_info, qt_map_list, split=':') for qt_map in qt_map_list: if qt_map and 'QtreeName' in qt_map.keys(): fs_id = self.get_fs_id(qt_map['VserverName'], qt_map['VolumeName']) qtree_path = None for fs_map in fs_map_list: if fs_map and 'VserverName' in fs_map.keys() \ and fs_id == self.get_fs_id( fs_map['VserverName'], fs_map['VolumeName']) \ and fs_map['JunctionPath'] != '-': qtree_path = fs_map['JunctionPath'] break qt_id = self.get_qt_id( qt_map['VserverName'], qt_map['VolumeName'], qt_map['QtreeName']) qtree_name = qt_map['QtreeName'] if qt_map['QtreeName'] and qtree_path: qtree_path += '/' + qt_map['QtreeName'] qtree_path = qtree_path.replace('//', '/') else: qtree_name = qt_id qt_model = { 'name': qtree_name, 'storage_id': storage_id, 'native_qtree_id': qt_id, 'path': qtree_path, 'native_filesystem_id': fs_id, 'security_mode': qt_map['SecurityStyle'], } qt_list.append(qt_model) return qt_list except exception.DelfinException as err: err_msg = "Failed to get storage qtrees from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise err except Exception as err: err_msg = "Failed to get storage qtrees from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_nfs_shares(self, storage_id, qtree_list, protocol_map): try: nfs_info = self.ssh_pool.do_exec( constant.NFS_SHARE_SHOW_COMMAND) nfs_list = [] fs_map_list = [] Tools.split_value_map_list(nfs_info, fs_map_list, split=':') for fs_map in fs_map_list: if fs_map and 'VserverName' in fs_map.keys(): protocol = protocol_map.get(fs_map['VserverName']) if constants.ShareProtocol.NFS in protocol: fs_id = self.get_fs_id(fs_map['VserverName'], fs_map['VolumeName']) share_name = \ fs_map['VserverName'] + '/' + fs_map['VolumeName'] qt_id = self.get_qt_id(fs_map['VserverName'], fs_map['VolumeName'], '') qtree_id = None for qtree in qtree_list: if qtree['native_qtree_id'] == qt_id: qtree_id = qt_id if fs_id == qtree['native_filesystem_id']\ and qtree['name'] != ""\ and qtree['name'] != \ qtree['native_qtree_id']: qt_share_name = \ share_name + '/' + qtree['name'] share = { 'name': qt_share_name, 'storage_id': storage_id, 'native_share_id': qt_share_name + '_' + constants.ShareProtocol.NFS, 'native_qtree_id': qtree['native_qtree_id'], 'native_filesystem_id': qtree['native_filesystem_id'], 'path': qtree['path'], 'protocol': constants.ShareProtocol.NFS } nfs_list.append(share) share = { 'name': share_name, 'storage_id': storage_id, 'native_share_id': share_name + '_' + constants.ShareProtocol.NFS, 'native_qtree_id': qtree_id, 'native_filesystem_id': fs_id, 'path': fs_map['JunctionPath'], 'protocol': constants.ShareProtocol.NFS } nfs_list.append(share) return nfs_list except exception.DelfinException as err: err_msg = "Failed to get storage nfs share from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise err except Exception as err: err_msg = "Failed to get storage nfs share from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_cifs_shares(self, storage_id, vserver_name, qtree_list, protocol_map): shares_list = [] share_info = self.ssh_pool.do_exec( (constant.CIFS_SHARE_SHOW_DETAIL_COMMAND % {'vserver_name': vserver_name})) share_map_list = [] Tools.split_value_map_list(share_info, share_map_list, split=':') for share_map in share_map_list: if share_map and 'VolumeName' in share_map.keys() and \ share_map['VolumeName'] != '-': protocol_str = protocol_map.get( share_map['Vserver']) fs_id = self.get_fs_id(share_map['Vserver'], share_map['VolumeName']) share_id = fs_id + '_' + share_map['Share'] + '_' qtree_id = None for qtree in qtree_list: name_array = share_map['Path'].split('/') if len(name_array) > 0: qtree_name = name_array[len(name_array) - 1] if qtree_name == share_map['VolumeName']: qtree_name = '' qt_id = self.get_qt_id( share_map['Vserver'], share_map['VolumeName'], qtree_name) else: break if qtree['native_qtree_id'] == qt_id: qtree_id = qt_id break if constants.ShareProtocol.CIFS in protocol_str: share = { 'name': share_map['Share'], 'storage_id': storage_id, 'native_share_id': share_id + constants.ShareProtocol.CIFS, 'native_qtree_id': qtree_id, 'native_filesystem_id': fs_id, 'path': share_map['Path'], 'protocol': constants.ShareProtocol.CIFS } shares_list.append(share) return shares_list def list_shares(self, storage_id): try: shares_list = [] qtree_list = self.list_qtrees(None) protocol_info = self.ssh_pool.do_exec( constant.SHARE_AGREEMENT_SHOW_COMMAND) protocol_map = {} protocol_arr = self.get_table_data(protocol_info) for protocol in protocol_arr: agr_arr = protocol.split() if len(agr_arr) > 1: protocol_map[agr_arr[0]] = agr_arr[1] vserver_info = self.ssh_pool.do_exec( constant.VSERVER_SHOW_COMMAND) vserver_array = self.get_table_data(vserver_info) for vserver in vserver_array: vserver_name = vserver.split() if len(vserver_name) > 1: shares_list += self.get_cifs_shares( storage_id, vserver_name[0], qtree_list, protocol_map) shares_list += self.get_nfs_shares( storage_id, qtree_list, protocol_map) return shares_list except exception.DelfinException as err: err_msg = "Failed to get storage shares from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise err except Exception as err: err_msg = "Failed to get storage shares from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_filesystems(self, storage_id): try: fs_list = self.get_filesystems(storage_id) return fs_list except exception.DelfinException as e: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_quotas(self, storage_id): try: quota_list = [] quotas_info = self.ssh_pool.do_exec( constant.QUOTA_SHOW_DETAIL_COMMAND) quota_map_list = [] Tools.split_value_map_list(quotas_info, quota_map_list, ":") for quota_map in quota_map_list: user_group_name = None if quota_map and 'VolumeName' in quota_map.keys(): quota_id = \ quota_map['Vserver'] + '_' + \ quota_map['VolumeName'] + '_' + \ quota_map['Type'] + '_' + \ quota_map['QtreeName'] + '_' + \ quota_map['Target'] type = constant.QUOTA_TYPE.get(quota_map['Type']) qt_id = self.get_qt_id( quota_map['Vserver'], quota_map['VolumeName'], '') if type == 'tree' and quota_map['Target'] != '': qt_id += '/' + quota_map['Target'] else: if type == 'user' or 'group': user_group_name = quota_map['Target'] if quota_map['QtreeName'] != '': qt_id += '/' + quota_map['QtreeName'] fs_id = self.get_fs_id(quota_map['Vserver'], quota_map['VolumeName']) quota = { 'native_quota_id': quota_id, 'type': type, 'storage_id': storage_id, 'native_filesystem_id': fs_id, 'native_qtree_id': qt_id, 'capacity_hard_limit': self.get_size(quota_map['DiskLimit']), 'capacity_soft_limit': self.get_size(quota_map['SoftDiskLimit']), 'file_hard_limit': int(quota_map['FilesLimit']) if quota_map['FilesLimit'] != '-' else '-', 'file_soft_limit': int(quota_map['SoftFilesLimit']) if quota_map['SoftFilesLimit'] != '-' else '-', 'file_count': None, 'used_capacity': None, 'user_group_name': user_group_name } quota_list.append(quota) return quota_list except exception.DelfinException as e: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage volume from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_alert_sources(self): try: ip_list = [] mgt_ip = self.ssh_pool.do_exec(constant.MGT_IP_COMMAND) controller_list = self.list_controllers(None) for controller in controller_list: ip_list.append({'host': controller['mgmt_ip']}) mgt_ip_array = self.get_table_data(mgt_ip) ip_list.append({'host': mgt_ip_array[0].split()[2]}) return ip_list except exception.DelfinException as e: err_msg = "Failed to get storage ip from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage ip from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def do_rest_call(self, url, data): try: res = self.rest_client.do_call( url, data, 'GET', constant.SOCKET_TIMEOUT) if res.status_code == constant.RETURN_SUCCESS_CODE \ or res.status_code == constant.CREATED_SUCCESS_CODE \ or res.status_code == constant.ACCEPTED_RETURN_CODE: result_json = res.json() return result_json.get('records') elif res.status_code == constant.BAD_REQUEST_RETURN_CODE: raise exception.BadRequest() elif res.status_code == constant.UNAUTHORIZED_RETURN_CODE: raise exception.NotAuthorized() elif res.status_code == constant.FORBIDDEN_RETURN_CODE: raise exception.InvalidUsernameOrPassword() elif res.status_code == constant.NOT_FOUND_RETURN_CODE: LOG.error('Url did not get results url:%s' % url) return [] elif res.status_code == constant.METHOD_NOT_ALLOWED_CODE: raise exception.Invalid() elif res.status_code == constant.CONFLICT_RETURN_CODE: raise exception.Invalid() elif res.status_code == constant.INTERNAL_ERROR_CODE: raise exception.BadResponse() except exception.DelfinException as e: err_msg = "Failed to rest call from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to rest call from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def collect_perf_metrics(self, storage_id, resource_metrics, start_time, end_time): try: metrics = [] if start_time and end_time: metrics_keys = resource_metrics.keys() # storage metrics if constants.ResourceType.STORAGE in metrics_keys: metrics.extend( self.get_storage_perf( resource_metrics, storage_id, start_time, end_time)) # pool metrics if constants.ResourceType.STORAGE_POOL in metrics_keys: metrics.extend( self.get_pool_perf( resource_metrics, storage_id, start_time, end_time)) # volume metrics if constants.ResourceType.VOLUME in metrics_keys: metrics.extend( self.get_volume_perf( resource_metrics, storage_id, start_time, end_time)) # port metrics if constants.ResourceType.PORT in metrics_keys: metrics.extend( self.get_port_perf( resource_metrics, storage_id, start_time, end_time)) # filesystem metrics if constants.ResourceType.FILESYSTEM in metrics_keys: metrics.extend( self.get_fs_perf( resource_metrics, storage_id, start_time, end_time)) return metrics except exception.DelfinException as e: err_msg = "Failed to get storage performance from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage performance from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def get_storage_perf(self, metrics, storage_id, start_time, end_time): json_info = self.do_rest_call(constant.CLUSTER_PERF_URL, None) if json_info: system_info = self.ssh_pool.do_exec( constant.CLUSTER_SHOW_COMMAND) storage_map_list = [] Tools.split_value_map_list( system_info, storage_map_list, split=':') storage = storage_map_list[-1] storage_metrics = PerformanceHandler.\ get_perf_value(metrics, storage_id, start_time, end_time, json_info, storage['ClusterUUID'] + ':' + storage['ClusterSerialNumber'], storage['ClusterName'], constants.ResourceType.STORAGE) return storage_metrics return [] def get_pool_perf(self, metrics, storage_id, start_time, end_time): agg_info = self.ssh_pool.do_exec( constant.AGGREGATE_SHOW_DETAIL_COMMAND) agg_map_list = [] pool_metrics = [] Tools.split_value_map_list(agg_info, agg_map_list, split=':') for agg_map in agg_map_list: if 'UUIDString' in agg_map: uuid = agg_map['UUIDString'] json_info = self.do_rest_call( constant.POOL_PERF_URL % uuid, None) pool_metrics.extend( PerformanceHandler.get_perf_value( metrics, storage_id, start_time, end_time, json_info, agg_map['UUIDString'], agg_map['Aggregate'], constants.ResourceType.STORAGE_POOL)) return pool_metrics def get_volume_perf(self, metrics, storage_id, start_time, end_time): volume_info = \ self.ssh_pool.do_exec(constant.LUN_SHOW_DETAIL_COMMAND) volume_map_list = [] volume_metrics = [] Tools.split_value_map_list(volume_info, volume_map_list, split=':') for volume in volume_map_list: if 'LUNUUID' in volume: uuid = volume['LUNUUID'] json_info = self.do_rest_call( constant.VOLUME_PERF_URL % uuid, None) volume_metrics.extend( PerformanceHandler.get_perf_value( metrics, storage_id, start_time, end_time, json_info, volume['SerialNumber'], volume['LUNName'], constants.ResourceType.VOLUME)) return volume_metrics def get_fs_perf(self, metrics, storage_id, start_time, end_time): fs_info = self.do_rest_call( constant.FS_INFO_URL, {}) fs_metrics = [] for fs in fs_info: if 'uuid' in fs: uuid = fs['uuid'] json_info = self.do_rest_call( constant.FS_PERF_URL % uuid, None) fs_id = self.get_fs_id( fs['svm']['name'], fs['name']) fs_metrics.extend( PerformanceHandler.get_perf_value( metrics, storage_id, start_time, end_time, json_info, fs_id, fs['name'], constants.ResourceType.FILESYSTEM)) return fs_metrics def get_port_perf(self, metrics, storage_id, start_time, end_time): fc_port = self.do_rest_call(constant.FC_INFO_URL, None) port_metrics = [] for fc in fc_port: if 'uuid' in fc: uuid = fc['uuid'] json_info = self.do_rest_call( constant.FC_PERF_URL % uuid, None) port_id = fc['node']['name'] + '_' + fc['name'] port_metrics.extend( PerformanceHandler.get_perf_value( metrics, storage_id, start_time, end_time, json_info, port_id, fc['name'], constants.ResourceType.PORT)) eth_port = self.do_rest_call(constant.ETH_INFO_URL, {}) for eth in eth_port: if 'uuid' in eth: uuid = eth['uuid'] json_info = self.do_rest_call( constant.ETH_PERF_URL % uuid, None) port_id = eth['node']['name'] + '_' + eth['name'] port_metrics.extend( PerformanceHandler.get_perf_value( metrics, storage_id, start_time, end_time, json_info, port_id, eth['name'], constants.ResourceType.PORT)) return port_metrics def get_storage_version(self): version_info = self.ssh_pool.do_exec( constant.VERSION_SHOW_COMMAND) version_array = version_info.split("\r\n") for version in version_array: if 'NetApp' in version: storage_version = version.split(":") version_list = \ re.findall(constant.FLOAT_PATTERN, storage_version[0]) for ver_info in version_list: if float(ver_info) >= 9.0: return float(ver_info) return 9.0 @staticmethod def get_cap_by_version(version, capabilities): if version >= 9.6: capabilities['resource_metrics']['storage'] = \ constant.STORAGE_CAPABILITIES if version >= 9.7: capabilities['resource_metrics']['storagePool'] = \ constant.POOL_CAPABILITIES capabilities['resource_metrics']['port'] = \ constant.PORT_CAPABILITIES capabilities['resource_metrics']['filesystem'] = \ constant.FS_CAPABILITIES if version >= 9.8: capabilities['resource_metrics']['volume'] = \ constant.VOLUME_CAPABILITIES return capabilities @staticmethod def get_capabilities(filters): if filters: capabilities = { 'is_historic': True, 'resource_metrics': {} } version_List = \ re.findall( constant.FLOAT_PATTERN, filters.get('firmware_version')) version = 9.0 for ver_info in version_List: if float(ver_info) >= 9.0: version = float(ver_info) break NetAppHandler.get_cap_by_version(version, capabilities) return capabilities cap_map = {} for i in range(0, 10): capabilities = { 'is_historic': True, 'resource_metrics': {} } version = float('9.' + str(i)) NetAppHandler.get_cap_by_version(version, capabilities) cap_map[version] = capabilities return cap_map def get_latest_perf_timestamp(self): try: timestamp = 0 json_info = self.do_rest_call(constant.CLUSTER_PERF_URL, None) for perf_info in json_info: occur_time = \ int(time.mktime(time.strptime( perf_info.get('timestamp'), PerformanceHandler.TIME_TYPE))) second_offset = \ (time.mktime(time.localtime()) - time.mktime(time.gmtime())) occur_time = \ (occur_time + int(second_offset)) * 1000 if timestamp < occur_time: timestamp = occur_time if timestamp == 0: return None return timestamp except exception.DelfinException as e: err_msg = "Failed to get storage perf timestamp from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage perf timestamp from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_storage_host_initiators(self, storage_id): try: initiator_list = [] iscsi_initiator_info = self.ssh_pool.do_exec( constant.ISCSI_INITIATOR_COMMAND) fc_initiator_info = self.ssh_pool.do_exec( constant.FC_INITIATOR_COMMAND) new_initiator_info = self.ssh_pool.do_exec( constant.HOST_COMMAND) MappingHandler.format_initiators( initiator_list, new_initiator_info, storage_id, '', is_default=True) MappingHandler.format_initiators( initiator_list, iscsi_initiator_info, storage_id, constants.InitiatorType.ISCSI) MappingHandler.format_initiators( initiator_list, fc_initiator_info, storage_id, constants.InitiatorType.FC) return initiator_list except exception.DelfinException as e: err_msg = "Failed to get storage initiators from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage initiators from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_storage_hosts(self, storage_id): try: host_info = self.ssh_pool.do_exec(constant.HOST_COMMAND) return MappingHandler.format_host(host_info, storage_id) except exception.DelfinException as e: err_msg = "Failed to get storage port groups from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage por groups from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_port_groups(self, storage_id): try: port_set_info = self.ssh_pool.do_exec( constant.PORT_GROUP_COMMAND) lif_info = self.ssh_pool.do_exec( constant.LIF_COMMAND) return MappingHandler.format_port_group(port_set_info, lif_info, storage_id) except exception.DelfinException as e: err_msg = "Failed to get storage port groups from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage por groups from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) def list_masking_views(self, storage_id): try: mapping_info = self.ssh_pool.do_exec( constant.LUN_MAPPING_COMMAND) volume_info = self.ssh_pool.do_exec( constant.LUN_SHOW_DETAIL_COMMAND) host_list = self.list_storage_hosts(None) return MappingHandler.format_mapping_view(mapping_info, volume_info, storage_id, host_list) except exception.DelfinException as e: err_msg = "Failed to get storage masking views from " \ "netapp cmode: %s" % (six.text_type(e)) LOG.error(err_msg) raise e except Exception as err: err_msg = "Failed to get storage masking views from " \ "netapp cmode: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) ================================================ FILE: delfin/drivers/netapp/dataontap/performance_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from oslo_log import log from delfin.common import constants from delfin.drivers.netapp.dataontap import constants as constant from delfin.drivers.utils.tools import Tools LOG = log.getLogger(__name__) class PerformanceHandler(object): TIME_TYPE = '%Y-%m-%dT%H:%M:%SZ' @staticmethod def get_value(value, key): if key == 'iops' or key == 'readIops' or key == 'writeIops': return int(value) elif key == 'throughput' or key == 'readThroughput' \ or key == 'writeThroughput': unit = constant.CAP_MAP[key]['unit'] return PerformanceHandler.get_unit_size(value, unit) elif key == 'responseTime': return round(int(value) / 1000, 3) else: return value @staticmethod def get_unit_size(value, unit): if value is None: return None if value == '0' or value == 0: return 0 unit_array = unit.split('/') capacity = Tools.change_capacity_to_bytes(unit_array[0]) if capacity == 1: return value return round(int(value) / capacity, 3) @staticmethod def get_perf_value(metrics, storage_id, start_time, end_time, data_info, resource_id, resource_name, resource_type): fs_metrics = [] selection = metrics.get(resource_type) for key in selection: labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': resource_id, 'resource_name': resource_name, 'type': 'RAW', 'unit': constant.CAP_MAP[key]['unit'] } values = {} for perf_info in data_info: if perf_info.get('timestamp'): occur_time = \ int(time.mktime(time.strptime( perf_info.get('timestamp'), PerformanceHandler.TIME_TYPE))) second_offset = \ (time.mktime(time.localtime()) - time.mktime(time.gmtime())) timestamp = \ (occur_time + int(second_offset)) * 1000 if int(start_time) <= timestamp <= int(end_time) \ and timestamp % 60000 == 0: key_list = constant.PERF_MAP.get(key, []) if len(key_list) > 0: value = perf_info.get(key_list[0], {}) \ .get(key_list[1], None) if value is not None: value = PerformanceHandler. \ get_value(value, key) values[timestamp] = value if values: m = constants.metric_struct(name=key, labels=labels, values=values) fs_metrics.append(m) return fs_metrics ================================================ FILE: delfin/drivers/pure/__init__.py ================================================ ================================================ FILE: delfin/drivers/pure/flasharray/__init__.py ================================================ ================================================ FILE: delfin/drivers/pure/flasharray/consts.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.common import constants # The default volume DEFAULT_CAPACITY = 0 # The default speed DEFAULT_SPEED = 0 # The default list_alerts time conversion DEFAULT_LIST_ALERTS_TIME_CONVERSION = 1000 # The default count for the get_volumes_info function DEFAULT_COUNT_GET_VOLUMES_INFO = 0 # Number of re-logins RE_LOGIN_TIMES = 3 # Constant one CONSTANT_ONE = 1 # Constant zero CONSTANT_ZERO = 0 # Success status code SUCCESS_STATUS_CODE = 200 # Status code of no permission PERMISSION_DENIED_STATUS_CODE = 401 # Custom token of Pure CUSTOM_TOKEN = 'x-next-token' # The default get_storage model CONTROLLER_PRIMARY = 'primary' # Normal value of the controller status NORMAL_CONTROLLER_STATUS = 'ready' # disk type DISK_TYPE_NVRAM = 'NVRAM' # The account password is incorrect during login. LOGIN_PASSWORD_ERR = 'invalid credentials' # list_port: Add ":" to the WWN every 2 sequences. SPLICE_WWN_SERIAL = 2 SPLICE_WWN_COLON = ':' SEVERITY_MAP = {'fatal': constants.Severity.FATAL, 'critical': constants.Severity.CRITICAL, 'major': constants.Severity.MAJOR, 'minor': constants.Severity.MINOR, 'warning': constants.Severity.WARNING, 'informational': constants.Severity.INFORMATIONAL, 'NotSpecified': constants.Severity.NOT_SPECIFIED} CATEGORY_MAP = {'fault': constants.Category.FAULT, 'event': constants.Category.EVENT, 'recovery': constants.Category.RECOVERY, 'notSpecified': constants.Category.NOT_SPECIFIED} CONTROLLER_STATUS_MAP = {'normal': constants.ControllerStatus.NORMAL, 'ok': constants.ControllerStatus.NORMAL, 'offline': constants.ControllerStatus.OFFLINE, 'not_installed': constants.ControllerStatus.OFFLINE, 'fault': constants.ControllerStatus.FAULT, 'degraded': constants.ControllerStatus.DEGRADED, 'unready': constants.ControllerStatus.UNKNOWN} DISK_STATUS_MAP = {'normal': constants.DiskStatus.NORMAL, 'healthy': constants.DiskStatus.NORMAL, 'abnormal': constants.DiskStatus.ABNORMAL, 'unhealthy': constants.DiskStatus.ABNORMAL, 'offline': constants.DiskStatus.OFFLINE} PORT_STATUS_MAP = {'ok': constants.PortHealthStatus.NORMAL, 'not_installed': constants.PortHealthStatus.ABNORMAL } PARSE_ALERT_ALERT_ID = '1.3.6.1.2.1.1.3.0' PARSE_ALERT_STORAGE_NAME = '1.3.6.1.4.1.40482.3.1' PARSE_ALERT_CONTROLLER_NAME = '1.3.6.1.4.1.40482.3.3' PARSE_ALERT_ALERT_NAME = '1.3.6.1.4.1.40482.3.5' PARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.40482.3.6' PARSE_ALERT_SEVERITY = '1.3.6.1.4.1.40482.3.7' PARSE_ALERT_SEVERITY_MAP = {'1': constants.Severity.WARNING, '2': constants.Severity.INFORMATIONAL} # collect_perf_metrics method SIXTY = 60 LIST_METRICS = -1 STORAGE_CAP = { constants.StorageMetric.IOPS.name: { "unit": constants.StorageMetric.IOPS.unit, "description": constants.StorageMetric.IOPS.description }, constants.StorageMetric.READ_IOPS.name: { "unit": constants.StorageMetric.READ_IOPS.unit, "description": constants.StorageMetric.READ_IOPS.description }, constants.StorageMetric.WRITE_IOPS.name: { "unit": constants.StorageMetric.WRITE_IOPS.unit, "description": constants.StorageMetric.WRITE_IOPS.description }, constants.StorageMetric.THROUGHPUT.name: { "unit": constants.StorageMetric.THROUGHPUT.unit, "description": constants.StorageMetric.THROUGHPUT.description }, constants.StorageMetric.READ_THROUGHPUT.name: { "unit": constants.StorageMetric.READ_THROUGHPUT.unit, "description": constants.StorageMetric.READ_THROUGHPUT.description }, constants.StorageMetric.WRITE_THROUGHPUT.name: { "unit": constants.StorageMetric.WRITE_THROUGHPUT.unit, "description": constants.StorageMetric.WRITE_THROUGHPUT.description }, constants.StorageMetric.READ_RESPONSE_TIME.name: { "unit": constants.StorageMetric.READ_RESPONSE_TIME.unit, "description": constants.StorageMetric.READ_RESPONSE_TIME.description }, constants.StorageMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.StorageMetric.WRITE_RESPONSE_TIME.unit, "description": constants.StorageMetric.WRITE_RESPONSE_TIME.description } } VOLUME_CAP = { constants.VolumeMetric.IOPS.name: { "unit": constants.VolumeMetric.IOPS.unit, "description": constants.VolumeMetric.IOPS.description }, constants.VolumeMetric.READ_IOPS.name: { "unit": constants.VolumeMetric.READ_IOPS.unit, "description": constants.VolumeMetric.READ_IOPS.description }, constants.VolumeMetric.WRITE_IOPS.name: { "unit": constants.VolumeMetric.WRITE_IOPS.unit, "description": constants.VolumeMetric.WRITE_IOPS.description }, constants.VolumeMetric.THROUGHPUT.name: { "unit": constants.VolumeMetric.THROUGHPUT.unit, "description": constants.VolumeMetric.THROUGHPUT.description }, constants.VolumeMetric.READ_THROUGHPUT.name: { "unit": constants.VolumeMetric.READ_THROUGHPUT.unit, "description": constants.VolumeMetric.READ_THROUGHPUT.description }, constants.VolumeMetric.WRITE_THROUGHPUT.name: { "unit": constants.VolumeMetric.WRITE_THROUGHPUT.unit, "description": constants.VolumeMetric.WRITE_THROUGHPUT.description }, constants.VolumeMetric.READ_RESPONSE_TIME.name: { "unit": constants.VolumeMetric.READ_RESPONSE_TIME.unit, "description": constants.VolumeMetric.READ_RESPONSE_TIME.description }, constants.VolumeMetric.WRITE_RESPONSE_TIME.name: { "unit": constants.VolumeMetric.WRITE_RESPONSE_TIME.unit, "description": constants.VolumeMetric.WRITE_RESPONSE_TIME.description } } # Timestamp format conversion PURE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ' HOST_OS_TYPES_MAP = { 'linux': constants.HostOSTypes.LINUX, 'windows': constants.HostOSTypes.WINDOWS, 'solaris': constants.HostOSTypes.SOLARIS, 'hp-ux': constants.HostOSTypes.HP_UX, 'hpux': constants.HostOSTypes.HP_UX, 'aix': constants.HostOSTypes.AIX, 'xenserver': constants.HostOSTypes.XEN_SERVER, 'vmware esx': constants.HostOSTypes.VMWARE_ESX, 'esxi': constants.HostOSTypes.VMWARE_ESX, 'linux_vis': constants.HostOSTypes.LINUX_VIS, 'windows server 2012': constants.HostOSTypes.WINDOWS_SERVER_2012, 'oracle vm': constants.HostOSTypes.ORACLE_VM, 'oracle-vm-server': constants.HostOSTypes.ORACLE_VM, 'open vms': constants.HostOSTypes.OPEN_VMS, 'vms': constants.HostOSTypes.OPEN_VMS, 'unknown': constants.HostOSTypes.UNKNOWN } ================================================ FILE: delfin/drivers/pure/flasharray/pure_flasharray.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import hashlib import time from oslo_log import log from oslo_utils import units from delfin import exception, utils from delfin.common import constants from delfin.drivers import driver from delfin.drivers.pure.flasharray import rest_handler, consts from delfin.i18n import _ LOG = log.getLogger(__name__) class PureFlashArrayDriver(driver.StorageDriver): def __init__(self, **kwargs): super().__init__(**kwargs) self.rest_handler = rest_handler.RestHandler(**kwargs) self.rest_handler.login() def list_volumes(self, context): list_volumes = [] volumes = self.rest_handler.get_volumes() if volumes: for volume in volumes: volume_name = volume.get('name') total_capacity = int(volume.get('size', consts.DEFAULT_CAPACITY)) used_capacity = int(volume.get('volumes', consts.DEFAULT_CAPACITY)) volume_dict = { 'native_volume_id': volume_name, 'name': volume_name, 'total_capacity': total_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity, 'storage_id': self.storage_id, 'status': constants.StorageStatus.NORMAL, 'type': constants.VolumeType.THIN if volume.get('thin_provisioning') is not None else constants.VolumeType.THICK } list_volumes.append(volume_dict) return list_volumes def add_trap_config(self, context, trap_config): pass def clear_alert(self, context, alert): pass def get_storage(self, context): storages = self.rest_handler.rest_call( self.rest_handler.REST_STORAGE_URL) total_capacity = None used_capacity = None if storages: for storage in storages: used_capacity = int(storage.get('total', consts.DEFAULT_CAPACITY)) total_capacity = int(storage.get('capacity', consts.DEFAULT_CAPACITY)) break raw_capacity = consts.DEFAULT_CAPACITY disks = self.list_disks(context) if disks: for disk in disks: raw_capacity = raw_capacity + disk.get('capacity') arrays = self.rest_handler.rest_call(self.rest_handler.REST_ARRAY_URL) storage_name = None serial_number = None version = None if arrays: storage_name = arrays.get('array_name') serial_number = arrays.get('id') version = arrays.get('version') model = None status = constants.StorageStatus.NORMAL controllers = self.rest_handler.rest_call( self.rest_handler.REST_CONTROLLERS_URL) if controllers: for controller in controllers: if controller.get('mode') == consts.CONTROLLER_PRIMARY: model = controller.get('model') if controller.get('status') != \ consts.NORMAL_CONTROLLER_STATUS: status = constants.StorageStatus.ABNORMAL if not all((storages, arrays, controllers)): LOG.error('get_storage error, Unable to obtain data.') raise exception.StorageBackendException('Unable to obtain data') storage_result = { 'model': model, 'total_capacity': total_capacity, 'raw_capacity': raw_capacity, 'used_capacity': used_capacity, 'free_capacity': total_capacity - used_capacity, 'vendor': 'PURE', 'name': storage_name, 'serial_number': serial_number, 'firmware_version': version, 'status': status } return storage_result def list_alerts(self, context, query_para=None): alerts = self.rest_handler.rest_call(self.rest_handler.REST_ALERTS_URL) alerts_list = [] if alerts: for alert in alerts: alerts_model = dict() opened = alert.get('opened') time_difference = self.get_time_difference() timestamp = (int(datetime.datetime.strptime( opened, consts.PURE_TIME_FORMAT).timestamp() + time_difference) * consts. DEFAULT_LIST_ALERTS_TIME_CONVERSION)\ if opened is not None else None if query_para is not None: try: if timestamp is None or timestamp \ < int(query_para.get('begin_time')) or \ timestamp > int(query_para.get('end_time')): continue except Exception as e: LOG.error(e) alerts_model['occur_time'] = timestamp alerts_model['alert_id'] = alert.get('id') alerts_model['severity'] = consts.SEVERITY_MAP.get( alert.get('current_severity'), constants.Severity.NOT_SPECIFIED) alerts_model['category'] = constants.Category.FAULT component_name = alert.get('component_name') alerts_model['location'] = component_name alerts_model['type'] = constants.EventType.EQUIPMENT_ALARM alerts_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE event = alert.get('event') alerts_model['alert_name'] = event alerts_model['match_key'] = hashlib.md5(str(alert.get('id')). encode()).hexdigest() alerts_model['description'] = '({}:{}): {}'. \ format(alert.get('component_type'), component_name, event) alerts_list.append(alerts_model) return alerts_list @staticmethod def get_time_difference(): time_difference = time.mktime( time.localtime()) - time.mktime(time.gmtime()) return time_difference @staticmethod def parse_alert(context, alert): try: alert_model = dict() alert_model['alert_id'] = alert.get(consts.PARSE_ALERT_ALERT_ID) alert_model['severity'] = consts.PARSE_ALERT_SEVERITY_MAP.get( alert.get(consts.PARSE_ALERT_SEVERITY), constants.Severity.NOT_SPECIFIED) alert_model['category'] = constants.Category.FAULT alert_model['occur_time'] = utils.utcnow_ms() alert_model['description'] = '({}:{}): {}'.format(alert.get( consts.PARSE_ALERT_STORAGE_NAME), alert.get(consts.PARSE_ALERT_CONTROLLER_NAME), alert.get(consts.PARSE_ALERT_DESCRIPTION)) alert_model['location'] = alert.get( consts.PARSE_ALERT_CONTROLLER_NAME) alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE alert_model['alert_name'] = alert.get( consts.PARSE_ALERT_ALERT_NAME) alert_model['sequence_number'] = alert.get( consts.PARSE_ALERT_ALERT_ID) alert_model['match_key'] = hashlib.md5(str(alert.get( consts.PARSE_ALERT_ALERT_ID)).encode()).hexdigest() return alert_model except Exception as e: LOG.error(e) msg = (_("Failed to build alert model as some attributes missing")) raise exception.InvalidResults(msg) def list_controllers(self, context): list_controllers = [] controllers = self.rest_handler.rest_call( self.rest_handler.REST_CONTROLLERS_URL) hardware = self.get_hardware() if controllers: for controller in controllers: controllers_dict = dict() controller_name = controller.get('name') controllers_dict['name'] = controller_name controllers_dict['status'] = consts.CONTROLLER_STATUS_MAP.get( hardware.get(controller_name, {}).get('status'), constants.ControllerStatus.UNKNOWN) controllers_dict['soft_version'] = controller.get('version') controllers_dict['storage_id'] = self.storage_id controllers_dict['native_controller_id'] = controller_name controllers_dict['location'] = controller_name list_controllers.append(controllers_dict) return list_controllers def list_disks(self, context): hardware_dict = self.get_hardware() list_disks = [] disks = self.rest_handler.rest_call(self.rest_handler.REST_DISK_URL) if disks: for disk in disks: disk_type = disk.get('type') if disk_type == consts.DISK_TYPE_NVRAM or disk_type is None: continue disk_dict = dict() drive_name = disk.get('name') disk_dict['name'] = drive_name physical_type = disk_type.lower() if disk_type is not None \ else None disk_dict['physical_type'] = physical_type \ if physical_type in constants.DiskPhysicalType.ALL else \ constants.DiskPhysicalType.UNKNOWN disk_dict['status'] = consts.DISK_STATUS_MAP. \ get(disk.get('status'), constants.DiskStatus.OFFLINE) disk_dict['storage_id'] = self.storage_id disk_dict['capacity'] = int(disk.get('capacity', consts.DEFAULT_CAPACITY)) hardware_object = hardware_dict.get(drive_name, {}) speed = hardware_object.get('speed') disk_dict['speed'] = int(speed) if speed is not None else None disk_dict['model'] = hardware_object.get('model') disk_dict['serial_number'] = hardware_object. \ get('serial_number') disk_dict['native_disk_id'] = drive_name disk_dict['location'] = drive_name disk_dict['manufacturer'] = "PURE" disk_dict['firmware'] = "" list_disks.append(disk_dict) return list_disks def get_hardware(self): hardware_dict = dict() hardware = self.rest_handler.rest_call( self.rest_handler.REST_HARDWARE_URL) if hardware: for hardware_value in hardware: hardware_map = dict() hardware_map['speed'] = hardware_value.get('speed') hardware_map['serial_number'] = hardware_value.get('serial') hardware_map['model'] = hardware_value.get('model') hardware_map['status'] = hardware_value.get('status') hardware_dict[hardware_value.get('name')] = hardware_map return hardware_dict def list_ports(self, context): list_ports = [] networks = self.get_network() ports = self.get_ports() hardware_dict = self.rest_handler.rest_call( self.rest_handler.REST_HARDWARE_URL) if not hardware_dict: return list_ports for hardware in hardware_dict: hardware_result = dict() hardware_name = hardware.get('name') if 'FC' in hardware_name: hardware_result['type'] = constants.PortType.FC elif 'ETH' in hardware_name: hardware_result['type'] = constants.PortType.ETH elif 'SAS' in hardware_name: hardware_result['type'] = constants.PortType.SAS else: continue hardware_result['name'] = hardware_name hardware_result['native_port_id'] = hardware_name hardware_result['storage_id'] = self.storage_id hardware_result['location'] = hardware_name speed = hardware.get('speed') if speed is None: hardware_result['connection_status'] = \ constants.PortConnectionStatus.UNKNOWN elif speed == consts.CONSTANT_ZERO: hardware_result['connection_status'] = \ constants.PortConnectionStatus.DISCONNECTED hardware_result['speed'] = speed else: hardware_result['connection_status'] = \ constants.PortConnectionStatus.CONNECTED hardware_result['speed'] = int(speed) hardware_result['health_status'] = consts.PORT_STATUS_MAP.get( hardware.get('status'), constants.PortHealthStatus.UNKNOWN) port = ports.get(hardware_name) if port: hardware_result['wwn'] = port.get('wwn') network = networks.get(hardware_name) if network: hardware_result['mac_address'] = network.get('mac_address') hardware_result['logical_type'] = network.get('logical_type') hardware_result['ipv4_mask'] = network.get('ipv4_mask') hardware_result['ipv4'] = network.get('ipv4') list_ports.append(hardware_result) return list_ports def get_network(self): networks_object = dict() networks = self.rest_handler.rest_call( self.rest_handler.REST_NETWORK_URL) if networks: for network in networks: network_dict = dict() network_dict['mac_address'] = network.get('hwaddr') services_list = network.get('services') if services_list: for services in services_list: network_dict['logical_type'] = services if \ services in constants.PortLogicalType.ALL else None break network_dict['ipv4_mask'] = network.get('netmask') network_dict['ipv4'] = network.get('address') network_name = network.get('name').upper() networks_object[network_name] = network_dict return networks_object def get_ports(self): ports_dict = dict() ports = self.rest_handler.rest_call(self.rest_handler.REST_PORT_URL) if ports: for port in ports: port_dict = dict() port_name = port.get('name') wwn = port.get('wwn') port_dict['wwn'] = self.get_splice_wwn(wwn) \ if wwn is not None else port.get('iqn') ports_dict[port_name] = port_dict return ports_dict @staticmethod def get_splice_wwn(wwn): wwn_list = list(wwn) wwn_splice = wwn_list[0] for serial in range(1, len(wwn_list)): if serial % consts.SPLICE_WWN_SERIAL == consts.CONSTANT_ZERO: wwn_splice = '{}{}'.format(wwn_splice, consts.SPLICE_WWN_COLON) wwn_splice = '{}{}'.format(wwn_splice, wwn_list[serial]) return wwn_splice def list_storage_pools(self, context): return [] def remove_trap_config(self, context, trap_config): pass def reset_connection(self, context, **kwargs): self.rest_handler.logout() self.rest_handler.login() @staticmethod def get_access_url(): return 'https://{ip}' def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): LOG.info('The system(storage_id: %s) starts to collect storage and' ' volume performance, start_time: %s, end_time: %s', storage_id, start_time, end_time) metrics = [] if resource_metrics.get(constants.ResourceType.STORAGE): storage_metrics = self.get_storage_metrics( storage_id, resource_metrics.get(constants.ResourceType.STORAGE), start_time, end_time, constants.ResourceType.STORAGE) metrics.extend(storage_metrics) LOG.info('The system(storage_id: %s) stop to collect storage' ' performance, The length is: %s', storage_id, len(storage_metrics)) if resource_metrics.get(constants.ResourceType.VOLUME): volume_metrics = self.get_volume_metrics( storage_id, resource_metrics.get(constants.ResourceType.VOLUME), start_time, end_time, constants.ResourceType.VOLUME) metrics.extend(volume_metrics) LOG.info('The system(storage_id: %s) stop to collect volume' ' performance, The length is: %s', storage_id, len(volume_metrics)) return metrics def get_storage_metrics(self, storage_id, resource_metrics, start_time, end_time, resource_type): metrics = [] arrays_id, arrays_name = self.get_array() packaging_data = self.get_packaging_storage_data( end_time, start_time, resource_type) if not arrays_id or not arrays_name or not packaging_data or\ end_time < start_time: return metrics for resource_key in resource_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': arrays_id, 'resource_name': arrays_name, 'type': 'RAW', 'unit': resource_metrics[resource_key]['unit'] } resource_value = {} for about_timestamp in packaging_data.keys(): metrics_data = packaging_data.get(about_timestamp) resource_value[about_timestamp] = \ metrics_data.get(resource_key) metrics_res = constants.metric_struct( name=resource_key, labels=labels, values=resource_value) metrics.append(metrics_res) return metrics def get_packaging_storage_data(self, end_time, start_time, resource_type): duplicate = set() packaging_data = {} list_metrics = self.rest_handler.rest_call( self.rest_handler.REST_ARRAY_HISTORICAL_URL) for storage_metrics in (list_metrics or []): about_timestamp = self.checkout_data( storage_metrics, start_time, end_time, resource_type, duplicate) if about_timestamp is None: continue metrics_data = self.get_metrics_data( storage_metrics, about_timestamp) packaging_data[about_timestamp] = metrics_data return packaging_data def checkout_data(self, storage_metrics, start_time, end_time, resource_type, duplicate): opened = storage_metrics.get('time') if opened is None: return None timestamp_s = self.get_timestamp_s(opened) timestamp_ms = timestamp_s * units.k if timestamp_ms < start_time or timestamp_ms >= end_time: return None about_timestamp = \ int(timestamp_s / consts.SIXTY) * consts.SIXTY * units.k duplicate_value = self.get_duplicate_value( about_timestamp, resource_type, storage_metrics) if duplicate_value in duplicate: return None duplicate.add(duplicate_value) return about_timestamp def get_volume_metrics(self, storage_id, resource_metrics, start_time, end_time, resource_type): metrics = [] packaging_data = self.get_packaging_volume_data( end_time, resource_type, start_time) if end_time < start_time or not packaging_data: return metrics for volume_name in packaging_data.keys(): for resource_key in resource_metrics.keys(): labels = { 'storage_id': storage_id, 'resource_type': resource_type, 'resource_id': volume_name, 'resource_name': volume_name, 'type': 'RAW', 'unit': resource_metrics[resource_key]['unit'] } resource_value = {} for volume_metrics in (packaging_data.get(volume_name) or []): resource_value[volume_metrics.get('time')] = \ volume_metrics.get(resource_key) metrics_res = constants.metric_struct( name=resource_key, labels=labels, values=resource_value) metrics.append(metrics_res) return metrics def get_packaging_volume_data(self, end_time, resource_type, start_time): duplicate = set() packaging_data = {} list_metrics = self.rest_handler.rest_call( self.rest_handler.REST_VOLUME_HISTORICAL_URL) for volume_metrics in (list_metrics or []): about_timestamp = self.checkout_data( volume_metrics, start_time, end_time, resource_type, duplicate) if about_timestamp is None: continue volume_metrics_data = self.get_metrics_data( volume_metrics, about_timestamp) volume_metrics_list = packaging_data.get( volume_metrics.get('name')) if not volume_metrics_list: volume_metrics_list = [] volume_metrics_list.append(volume_metrics_data) packaging_data[volume_metrics.get('name')] = volume_metrics_list return packaging_data def get_timestamp_s(self, opened): time_difference = self.get_time_difference() timestamp_s = int( datetime.datetime.strptime(opened, consts.PURE_TIME_FORMAT) .timestamp() + time_difference) return timestamp_s @staticmethod def get_duplicate_value(about_timestamp, resource_type, storage_metrics): duplicate_value = None if resource_type == constants.ResourceType.VOLUME: duplicate_value = '{}{}'.format( storage_metrics.get('name'), about_timestamp) if resource_type == constants.ResourceType.STORAGE: duplicate_value = about_timestamp return duplicate_value @staticmethod def get_metrics_data(metrics, about_timestamp): read_iop = metrics.get('reads_per_sec') write_iop = metrics.get('writes_per_sec') read_throughput = metrics.get('output_per_sec') / units.Mi write_throughput = metrics.get('input_per_sec') / units.Mi read_response_time = metrics.get('usec_per_read_op') / units.k write_response_time = metrics.get('usec_per_write_op') / units.k metrics_data = { 'iops': round(read_iop + write_iop, 3), "readIops": round(read_iop, 3), "writeIops": round(write_iop, 3), "throughput": round(read_throughput + write_throughput, 3), "readThroughput": round(read_throughput, 3), "writeThroughput": round(write_throughput, 3), "readResponseTime": round(read_response_time, 3), "writeResponseTime": round(write_response_time, 3), 'time': about_timestamp } return metrics_data def get_array(self): arrays_id = None arrays_name = None arrays = self.rest_handler.rest_call( self.rest_handler.REST_ARRAY_URL) if arrays: arrays_id = arrays.get('id') arrays_name = arrays.get('array_name') return arrays_id, arrays_name @staticmethod def get_capabilities(context, filters=None): return { 'is_historic': True, 'resource_metrics': { constants.ResourceType.STORAGE: consts.STORAGE_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP } } def get_latest_perf_timestamp(self, context): list_metrics = self.rest_handler.rest_call( self.rest_handler.REST_ARRAY_HISTORICAL_URL) opened = list_metrics[consts.LIST_METRICS].get('time') timestamp_s = self.get_timestamp_s(opened) timestamp_ms = \ int(timestamp_s / consts.SIXTY) * consts.SIXTY * units.k return timestamp_ms def list_storage_host_initiators(self, context): list_initiators = [] initiators = self.rest_handler.rest_call( self.rest_handler.REST_HOST_URL) for initiator in (initiators or []): host_id = initiator.get('name') self.get_initiator(initiator, list_initiators, host_id, 'iqn', constants.InitiatorType.ISCSI) self.get_initiator(initiator, list_initiators, host_id, 'wwn', constants.InitiatorType.FC) self.get_initiator(initiator, list_initiators, host_id, 'nqn', constants.InitiatorType.NVME_OVER_FABRIC) return list_initiators def get_initiator(self, initiator, list_initiators, host_id, protocol, network): protocol_list = initiator.get(protocol) if protocol_list: for initiator_protocol in (protocol_list or []): if 'wwn' in protocol: initiator_protocol = self.get_splice_wwn( initiator_protocol) initiator_d = { 'native_storage_host_initiator_id': initiator_protocol, 'native_storage_host_id': host_id, 'name': initiator_protocol, 'type': network, 'status': constants.InitiatorStatus.UNKNOWN, 'wwn': initiator_protocol, 'storage_id': self.storage_id } list_initiators.append(initiator_d) def list_storage_hosts(self, ctx): host_list = [] hosts = self.rest_handler.rest_call( self.rest_handler.REST_HOST_PERSONALITY_URL) for host in (hosts or []): name = host.get('name') personality = host.get('personality').lower() \ if host.get('personality') else None h = { "name": name, "storage_id": self.storage_id, "native_storage_host_id": name, "os_type": consts.HOST_OS_TYPES_MAP.get( personality, constants.HostOSTypes.UNKNOWN), "status": constants.HostStatus.NORMAL } host_list.append(h) return host_list def list_storage_host_groups(self, context): host_groups = self.rest_handler.rest_call( self.rest_handler.REST_HGROUP_URL) host_group_list = [] storage_host_grp_relation_list = [] for hgroup in (host_groups or []): name = hgroup.get('name') hg = { 'native_storage_host_group_id': name, 'name': name, 'storage_id': self.storage_id } host_group_list.append(hg) for host in (hgroup.get('hosts') or []): host_relation = { 'native_storage_host_group_id': name, 'storage_id': self.storage_id, 'native_storage_host_id': host } storage_host_grp_relation_list.append(host_relation) result = { 'storage_host_groups': host_group_list, 'storage_host_grp_host_rels': storage_host_grp_relation_list } return result def list_volume_groups(self, context): volume_groups = self.rest_handler.rest_call( self.rest_handler.REST_VOLUME_GROUP_URL) vol_group_list = [] vol_grp_vol_relation_list = [] for volume_group in (volume_groups or []): name = volume_group.get('name') vol_g = { 'name': name, 'storage_id': self.storage_id, 'native_volume_group_id': name } vol_group_list.append(vol_g) for volume_id in (volume_group.get('volumes') or []): volume_group_relation = { 'storage_id': self.storage_id, 'native_volume_group_id': name, 'native_volume_id': volume_id } vol_grp_vol_relation_list.append(volume_group_relation) result = { 'volume_groups': vol_group_list, 'vol_grp_vol_rels': vol_grp_vol_relation_list } return result def list_masking_views(self, context): list_masking_views = [] view_id_dict = {} hgroup_views = self.rest_handler.rest_call( self.rest_handler.REST_HGROUP_CONNECT_URL) for hgroup_view in (hgroup_views or []): hgroup_name = hgroup_view.get('name') native_volume_id = hgroup_view.get('vol') native_masking_view_id = '{}{}'.format( hgroup_name, native_volume_id) if view_id_dict.get(hgroup_name): continue view_id_dict[native_masking_view_id] = hgroup_name view = { 'native_masking_view_id': native_masking_view_id, 'name': native_masking_view_id, 'native_storage_host_group_id': hgroup_name, 'native_volume_id': native_volume_id, 'storage_id': self.storage_id } list_masking_views.append(view) masking_views = self.rest_handler.rest_call( self.rest_handler.REST_HOST_CONNECT_URL) for masking_view in (masking_views or []): hgroup = masking_view.get('hgroup') host_id = masking_view.get('name') native_volume_id = masking_view.get('vol') hgroup_name = '{}{}'.format(hgroup, native_volume_id) if view_id_dict.get(hgroup_name) is not None and \ view_id_dict.get(hgroup_name) in hgroup: continue native_masking_view_id = '{}{}{}'.format( host_id, hgroup, native_volume_id) if view_id_dict.get(native_masking_view_id): continue view_id_dict[native_masking_view_id] = native_masking_view_id view = { 'native_masking_view_id': native_masking_view_id, 'name': native_masking_view_id, 'native_storage_host_id': host_id, 'native_volume_id': native_volume_id, 'storage_id': self.storage_id } list_masking_views.append(view) return list_masking_views ================================================ FILE: delfin/drivers/pure/flasharray/rest_handler.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log as logging from delfin import exception, cryptor from delfin.drivers.pure.flasharray import consts from delfin.drivers.utils.rest_client import RestClient LOG = logging.getLogger(__name__) class RestHandler(RestClient): REST_STORAGE_URL = '/api/1.17/array?space=true' REST_ARRAY_URL = '/api/1.17/array' REST_VOLUME_URL = '/api/1.17/volume?space=true&limit=500&token=' \ 'aWQgPSA5ODA1Mg==' REST_VOLUME_TOKEN_URL = '/api/1.17/volume?space=true&limit=20&token=' REST_PORT_URL = '/api/1.17/port' REST_NETWORK_URL = '/api/1.17/network' REST_DISK_URL = '/api/1.17/drive' REST_HARDWARE_URL = '/api/1.17/hardware' REST_CONTROLLERS_URL = '/api/1.17/array?controllers=true' REST_ALERTS_URL = '/api/1.17/message?flagged=true&open=true' REST_AUTH_URL = '/api/1.17/auth/apitoken' REST_SESSION_URL = '/api/1.17/auth/session' REST_HOST_URL = '/api/1.17/host' REST_HOST_PERSONALITY_URL = '/api/1.17/host?personality=true' REST_HOST_CONNECT_URL = '/api/1.17/host?connect=true' REST_HGROUP_CONNECT_URL = '/api/1.17/hgroup?connect=true' REST_HGROUP_URL = '/api/1.17/hgroup' REST_VOLUME_GROUP_URL = '/api/1.17/vgroup' REST_ARRAY_HISTORICAL_URL = '/api/1.17/array?action=monitor&historical=1h' REST_VOLUME_HISTORICAL_URL =\ '/api/1.17/volume?action=monitor&historical=1h' def __init__(self, **kwargs): super(RestHandler, self).__init__(**kwargs) def login(self): try: data = {'username': self.rest_username, 'password': cryptor.decode( self.rest_password)} self.init_http_head() token_res = self.do_call(RestHandler.REST_AUTH_URL, data, method='POST') if token_res.json().get('msg') == consts.LOGIN_PASSWORD_ERR: LOG.error("Login error, Obtaining the token is abnormal. " "status_code:%s, URL: %s", token_res.status_code, RestHandler.REST_AUTH_URL) raise exception.InvalidUsernameOrPassword( 'Obtaining the token is abnormal') if token_res.status_code != consts.SUCCESS_STATUS_CODE or not \ token_res.json().get('api_token'): LOG.error("Login error, Obtaining the token is abnormal. " "status_code:%s, URL: %s", token_res.status_code, RestHandler.REST_AUTH_URL) raise exception.StorageBackendException( 'Obtaining the token is abnormal') session_res = self.do_call(RestHandler.REST_SESSION_URL, token_res.json(), method='POST') if session_res.status_code != consts.SUCCESS_STATUS_CODE or not \ session_res.json().get('username'): LOG.error("Login error, Obtaining the session is abnormal." "status_code:%s, URL: %s", session_res.status_code, RestHandler.REST_SESSION_URL) raise exception.StorageBackendException( 'Obtaining the session is abnormal.') except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e finally: data = None token_res = None def logout(self): res = self.do_call(RestHandler.REST_SESSION_URL, None, method='DELETE') if res.status_code != consts.SUCCESS_STATUS_CODE\ or not res.json().get('username'): LOG.error("Logout error, Deleting a Token Exception." "status_code:%s, URL: %s", res.status_code, RestHandler.REST_SESSION_URL) raise exception.StorageBackendException(res.text) def rest_call(self, url, data=None, method='GET'): result_json = None res = self.do_call(url, data, method) if res.status_code == consts.SUCCESS_STATUS_CODE: result_json = res.json() elif res.status_code == consts.PERMISSION_DENIED_STATUS_CODE: self.login() the_second_time_res = self.do_call(url, data, method) if the_second_time_res.status_code == consts.SUCCESS_STATUS_CODE: result_json = the_second_time_res.json() return result_json def get_volumes(self, url=REST_VOLUME_URL, data=None, volume_list=None, count=consts.DEFAULT_COUNT_GET_VOLUMES_INFO): if volume_list is None: volume_list = [] res = self.do_call(url, data, 'GET') if res.status_code == consts.SUCCESS_STATUS_CODE: result_json = res.json() volume_list.extend(result_json) next_token = res.headers.get(consts.CUSTOM_TOKEN) if next_token: url = '%s%s' % (RestHandler.REST_VOLUME_TOKEN_URL, next_token) self.get_volumes(url, data, volume_list) elif res.status_code == consts.PERMISSION_DENIED_STATUS_CODE: self.login() if count < consts.RE_LOGIN_TIMES: count = count + consts.CONSTANT_ONE self.get_volumes(url, data, volume_list, count) return volume_list ================================================ FILE: delfin/drivers/utils/__init__.py ================================================ ================================================ FILE: delfin/drivers/utils/performance_file/__init__.py ================================================ ================================================ FILE: delfin/drivers/utils/performance_file/macro_san/__init__.py ================================================ ================================================ FILE: delfin/drivers/utils/performance_file/svc/__init__.py ================================================ ================================================ FILE: delfin/drivers/utils/performance_file/vnx_block/__init__.py ================================================ ================================================ FILE: delfin/drivers/utils/rest_client.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import requests import six from oslo_log import log as logging from delfin import exception from delfin import ssl_utils from delfin.i18n import _ LOG = logging.getLogger(__name__) SOCKET_TIMEOUT = 10 class RestClient(object): def __init__(self, **kwargs): rest_access = kwargs.get('rest') if rest_access is None: raise exception.InvalidInput('Input rest_access is missing') self.rest_host = rest_access.get('host') self.rest_port = rest_access.get('port') self.rest_username = rest_access.get('username') self.rest_password = rest_access.get('password') self.san_address = 'https://%s:%s' % \ (self.rest_host, str(self.rest_port)) self.session = None self.device_id = None self.verify = kwargs.get('verify', False) self.rest_auth_token = None def init_http_head(self): if self.session: self.session.close() self.session = requests.Session() self.session.headers.update({ "Connection": "keep-alive", 'Accept': 'application/json', "Content-Type": "application/json"}) if not self.verify: self.session.verify = False else: LOG.debug("Enable certificate verification, ca_path: {0}".format( self.verify)) self.session.verify = self.verify self.session.trust_env = False self.session.mount("https://", ssl_utils.get_host_name_ignore_adapter()) def do_call(self, url, data, method, calltimeout=SOCKET_TIMEOUT): if 'http' not in url: if self.san_address: url = '%s%s' % (self.san_address, url) kwargs = {'timeout': calltimeout} if data: kwargs['data'] = json.dumps(data) if method in ('POST', 'PUT', 'GET', 'DELETE'): func = getattr(self.session, method.lower()) else: msg = _("Request method %s is invalid.") % method LOG.error(msg) raise exception.StorageBackendException(msg) res = None try: res = func(url, **kwargs) except requests.exceptions.ConnectTimeout as ct: LOG.error('Connect Timeout error for url([{}]{}): {}'.format( method, url, ct)) raise exception.InvalidIpOrPort() except requests.exceptions.ReadTimeout as rt: LOG.error('Read timed out error for url([{}]{}): {}'.format( method, url, rt)) raise exception.StorageBackendException(six.text_type(rt)) except requests.exceptions.SSLError as e: err_str = six.text_type(e) LOG.error('SSLError for url([{}]{}): {}'.format( method, url, err_str)) if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: raise exception.SSLHandshakeFailed() except Exception as err: LOG.error('Bad response from server for url([{}]{}): {}'.format( method, url, err)) if 'WSAETIMEDOUT' in str(err): raise exception.ConnectTimeout() elif 'Failed to establish a new connection' in str(err): raise exception.InvalidIpOrPort() elif 'Read timed out' in str(err): raise exception.StorageBackendException(six.text_type(err)) else: raise exception.BadResponse() return res ================================================ FILE: delfin/drivers/utils/ssh_client.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import paramiko import six from eventlet import pools from oslo_log import log as logging from paramiko.hostkeys import HostKeyEntry from delfin import cryptor from delfin import exception, utils LOG = logging.getLogger(__name__) class SSHClient(object): SOCKET_TIMEOUT = 10 def __init__(self, **kwargs): ssh_access = kwargs.get('ssh') if ssh_access is None: raise exception.InvalidInput('Input ssh_access is missing') self.ssh_host = ssh_access.get('host') self.ssh_port = ssh_access.get('port') self.ssh_username = ssh_access.get('username') self.ssh_password = ssh_access.get('password') self.ssh_pub_key_type = ssh_access.get('pub_key_type') self.ssh_pub_key = ssh_access.get('pub_key') self.ssh_conn_timeout = ssh_access.get('conn_timeout') if self.ssh_conn_timeout is None: self.ssh_conn_timeout = SSHClient.SOCKET_TIMEOUT def connect(self): self.ssh = paramiko.SSHClient() if self.ssh_pub_key is None: self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) else: host_key = '%s %s %s' % \ (self.ssh_host, self.ssh_pub_key_type, self.ssh_pub_key) self.set_host_key(host_key) self.ssh.connect(hostname=self.ssh_host, port=self.ssh_port, username=self.ssh_username, password=cryptor.decode(self.ssh_password), timeout=self.ssh_conn_timeout) def set_host_key(self, host_key): """ Set public key,because input kwargs parameter host_key is string, not a file path,we can not use load file to get public key,so we set it as a string. :param str host_key: the public key which as a string """ if (len(host_key) == 0) or (host_key[0] == "#"): return try: e = HostKeyEntry.from_line(host_key) except exception.SSHException: return if e is not None: host_names = e.hostnames for h in host_names: if self.ssh._host_keys.check(h, e.key): e.hostnames.remove(h) if len(e.hostnames): self.ssh._host_keys._entries.append(e) def exec_command(self, command_str): result = None try: if command_str is not None: if self.ssh is not None: stdin, stdout, stderr = self.ssh.exec_command(command_str) res, err = stdout.read(), stderr.read() re = res if res else err result = re.decode() except Exception as e: LOG.error(e) result = e return result def close(self): try: if self.ssh is not None: # Close connection self.ssh.close() self.ssh = None except Exception as e: LOG.error(e) def do_exec(self, command_str): """Execute command""" re = None try: if command_str is not None: self.connect() re = self.exec_command(command_str) except paramiko.AuthenticationException as ae: LOG.error('doexec Authentication error:{}'.format(ae)) raise exception.InvalidUsernameOrPassword() except Exception as e: LOG.error('doexec InvalidUsernameOrPassword error:{}'.format(e)) if 'WSAETIMEDOUT' in str(e): raise exception.SSHConnectTimeout() elif 'No authentication methods available' in str(e) \ or 'Authentication failed' in str(e): raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in str(e): raise exception.InvalidPrivateKey() elif 'not found in known_hosts' in str(e): raise exception.SSHNotFoundKnownHosts(self.ssh_host) else: raise exception.SSHException() finally: self.close() return re class SSHPool(pools.Pool): CONN_TIMEOUT = 60 def __init__(self, **kwargs): ssh_access = kwargs.get('ssh') if ssh_access is None: raise exception.InvalidInput('Input ssh_access is missing') self.ssh_host = ssh_access.get('host') self.ssh_port = ssh_access.get('port') self.ssh_username = ssh_access.get('username') self.ssh_password = ssh_access.get('password') self.ssh_pub_key_type = ssh_access.get('pub_key_type') self.ssh_pub_key = ssh_access.get('pub_key') self.ssh_conn_timeout = ssh_access.get('conn_timeout') if self.ssh_conn_timeout is None: self.ssh_conn_timeout = SSHPool.CONN_TIMEOUT super(SSHPool, self).__init__(min_size=0, max_size=3) def set_host_key(self, host_key, ssh): """ Set public key,because input kwargs parameter host_key is string, not a file path,we can not use load file to get public key,so we set it as a string. :param str host_key: the public key which as a string """ if (len(host_key) == 0) or (host_key[0] == "#"): return try: e = HostKeyEntry.from_line(host_key) except exception.SSHException: return if e is not None: host_names = e.hostnames for h in host_names: if ssh._host_keys.check(h, e.key): e.hostnames.remove(h) if len(e.hostnames): ssh._host_keys._entries.append(e) def create(self): ssh = paramiko.SSHClient() try: if self.ssh_pub_key is None: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) else: host_key = '%s %s %s' % \ (self.ssh_host, self.ssh_pub_key_type, self.ssh_pub_key) self.set_host_key(host_key, ssh) ssh.connect(hostname=self.ssh_host, port=self.ssh_port, username=self.ssh_username, password=cryptor.decode(self.ssh_password), timeout=self.ssh_conn_timeout) transport = ssh.get_transport() transport.set_keepalive(self.ssh_conn_timeout) return ssh except Exception as e: err = six.text_type(e) LOG.error(err) if 'timed out' in err: raise exception.InvalidIpOrPort() elif 'No authentication methods available' in err \ or 'Authentication failed' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err: raise exception.InvalidPrivateKey() elif 'not found in known_hosts' in err: raise exception.SSHNotFoundKnownHosts(self.ssh_host) else: raise exception.SSHException(err) def get(self): """Return an item from the pool, when one is available. This may cause the calling greenthread to block. Check if a connection is active before returning it. For dead connections create and return a new connection. """ if self.free_items: conn = self.free_items.popleft() if conn: if conn.get_transport().is_active(): return conn else: conn.close() self.current_size -= 1 if self.current_size < self.max_size: try: self.current_size += 1 created = self.create() except Exception as e: self.current_size -= 1 raise e return created return self.channel.get() def remove(self, ssh): """Close an ssh client and remove it from free_items.""" ssh.close() if ssh in self.free_items: self.free_items.remove(ssh) if self.current_size > 0: self.current_size -= 1 def put(self, conn): if self.current_size > self.max_size: conn.close() self.current_size -= 1 return super(SSHPool, self).put(conn) def do_exec(self, command_str): result = '' try: with self.item() as ssh: utils.check_ssh_injection(command_str) if command_str is not None and ssh is not None: stdin, stdout, stderr = ssh.exec_command(command_str) res, err = stdout.read(), stderr.read() re = res if res else err result = re.decode() except paramiko.AuthenticationException as ae: LOG.error('doexec Authentication error:{}'.format(ae)) raise exception.InvalidUsernameOrPassword() except Exception as e: err = six.text_type(e) LOG.error(err) if 'timed out' in err \ or 'SSH connect timeout' in err\ or 'Unable to connect to port' in err: raise exception.ConnectTimeout() elif 'No authentication methods available' in err \ or 'Authentication failed' in err \ or 'Invalid username or password' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err \ or 'not a valid RSA private key' in err: raise exception.InvalidPrivateKey() else: raise exception.SSHException(err) if 'invalid command name' in result or 'login failed' in result or\ 'is not a recognized command' in result: raise exception.StorageBackendException(result) return result def do_exec_shell(self, command_list, sleep_time=0.5): result = '' try: with self.item() as ssh: if command_list and ssh: channel = ssh.invoke_shell() for command in command_list: utils.check_ssh_injection(command) channel.send(command + '\n') time.sleep(sleep_time) channel.send("exit" + "\n") channel.close() while True: resp = channel.recv(9999).decode('utf8') if not resp: break result += resp if 'is not a recognized command' in result \ or 'Unknown command' in result: raise exception.InvalidIpOrPort() except paramiko.AuthenticationException as ae: LOG.error('doexec Authentication error:{}'.format(ae)) raise exception.InvalidUsernameOrPassword() except Exception as e: err = six.text_type(e) LOG.error(err) if 'timed out' in err \ or 'SSH connect timeout' in err: raise exception.SSHConnectTimeout() elif 'No authentication methods available' in err \ or 'Authentication failed' in err \ or 'Invalid username or password' in err: raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err \ or 'not a valid RSA private key' in err: raise exception.InvalidPrivateKey() elif 'Unable to connect to port' in err \ or 'Invalid ip or port' in err: raise exception.InvalidIpOrPort() else: raise exception.SSHException(err) return result ================================================ FILE: delfin/drivers/utils/tools.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import os import re import time import six try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET from scp import SCPClient from oslo_log import log as logging from oslo_utils import units LOG = logging.getLogger(__name__) class Tools(object): def time_str_to_timestamp(self, time_str, time_pattern): """ Time str to time stamp conversion """ time_stamp = '' if time_str: time_array = time.strptime(time_str, time_pattern) time_stamp = int(time.mktime(time_array) * units.k) return time_stamp def timestamp_to_time_str(self, time_stamp, time_pattern): """ Time stamp to time str conversion """ time_str = '' if time_stamp: time_stamp = time_stamp / units.k time_array = time.localtime(time_stamp) time_str = time.strftime(time_pattern, time_array) return time_str @staticmethod def timestamp_to_utc_time_str(time_stamp, time_pattern): """ Time stamp to time str conversion """ time_str = '' if time_stamp: time_stamp = time_stamp / units.k dateArray = datetime.datetime.utcfromtimestamp(time_stamp) time_str = dateArray.strftime(time_pattern) return time_str @staticmethod def change_capacity_to_bytes(unit): unit = unit.upper() if unit == 'TB': res = units.Ti elif unit == 'GB': res = units.Gi elif unit == 'MB': res = units.Mi elif unit == 'KB': res = units.Ki else: res = 1 return int(res) @staticmethod def get_capacity_size(value): capacity = 0 if value and value != '' and value != '-' and value != '0B': if value.isdigit(): capacity = float(value) else: unit = value[-2:] capacity = float(value[:-2]) * int( Tools.change_capacity_to_bytes(unit)) return capacity @staticmethod def split_value_map_list(value_info, map_list, is_mapping=False, is_alert=False, split=":"): detail_array = value_info.split('\r\n') value_map = {} temp_key = '' for detail in detail_array: if detail: string_info = detail.split(split + " ") key = string_info[0].replace(' ', '') value = '' if len(string_info) > 1 or is_mapping: for string in string_info[1:]: value = string.replace('""', '') value_map[key] = value if is_alert and key and len(string_info) > 1: temp_key = key continue if is_alert and temp_key and 'entries' not in detail: if len(string_info) > 1: value_map[temp_key] += string_info[1] elif len(string_info) == 1: value_map[temp_key] += string_info[0] else: if value_map != {}: map_list.append(value_map) value_map = {} if value_map != {}: map_list.append(value_map) return map_list @staticmethod def get_numbers_in_brackets(source_info, pattern_str): """Get the contents in brackets through regular expressions. source_info:Source data, example: "collect time (1583012100)" pattern_str: regular expression. example:"\\(\\d+\\)" """ object_info = '' object_infos = re.findall(pattern_str, source_info) if object_infos: object_info = object_infos[0].replace('(', '').replace(')', '') return object_info @staticmethod def remove_file_with_same_type(file_name, file_path): file_type = '%s_%s_%s' % (file_name.split('_')[0], file_name.split('_')[1], file_name.split('_')[2]) path_dir = os.listdir(file_path) for file in path_dir: if file_type in file: local_file = '%s%s' % (file_path, file) os.remove(local_file) @staticmethod def get_remote_file_to_xml(ssh, file, local_path, remote_path): root_node = None local_file = '%s%s' % (local_path, file) try: scp_client = SCPClient(ssh.get_transport(), socket_timeout=15.0) remote_file = '%s%s' % (remote_path, file) scp_client.get(remote_file, local_path) root_node = open(local_file).read() root_node = ET.fromstring(root_node) except Exception as e: err_msg = "Failed to copy statics file: %s" % \ (six.text_type(e)) LOG.error(err_msg) finally: if os.path.exists(local_file): Tools.remove_file_with_same_type(file, local_path) return root_node ================================================ FILE: delfin/exception.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Delfin base exception handling. Includes decorator for re-raising Delfin-type exceptions. SHOULD include dedicated exception logging. """ import six import webob.exc from oslo_log import log from delfin.i18n import _ LOG = log.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, exception): self.code = exception.code self.title = '' self.explanation = exception.msg self.error_code = exception.error_code self.error_args = exception.error_args super(ConvertedException, self).__init__() class DelfinException(Exception): """Base Delfin Exception To correctly use this class, inherit from it and define a 'msg_fmt' property. That msg_fmt will get printf'd with the tuple arguments provided to the constructor. """ msg_fmt = _("An unknown exception occurred.") code = 500 def __init__(self, *args, **kwargs): self.error_args = args message = kwargs.get('message') try: if not message: message = self.msg_fmt.format(*args) else: message = six.text_type(message) except Exception: LOG.error("Failed to format message: {0}".format(args)) message = self.msg_fmt self.msg = message super(DelfinException, self).__init__(message) @property def error_code(self): return self.__class__.__name__ class NotAuthorized(DelfinException): msg_fmt = _("Not authorized.") code = 403 class Invalid(DelfinException): msg_fmt = _("Unacceptable parameters.") code = 400 class BadRequest(Invalid): msg_fmt = _('The server could not comply with the request since\r\n' 'it is either malformed or otherwise incorrect.\r\n') code = 400 class MalformedRequestBody(Invalid): msg_fmt = _("Malformed request body: {0}.") class MalformedRequestUrl(Invalid): msg_fmt = _("Malformed request url.") class InvalidCredential(Invalid): msg_fmt = _("The credentials are invalid.") class InvalidResults(Invalid): msg_fmt = _("The results are invalid. {0}") class InvalidInput(Invalid): msg_fmt = _("Invalid input received. {0}") class InvalidName(Invalid): msg_fmt = _("An invalid 'name' value was provided. {0}") class InvalidContentType(Invalid): msg_fmt = _("Invalid content type: {0}.") class StorageSerialNumberMismatch(Invalid): msg_fmt = _("Storage serial number mismatch. {0}") class StorageAlreadyExists(Invalid): msg_fmt = _("Storage already exists.") class InvalidSNMPConfig(Invalid): msg_fmt = _("Invalid SNMP configuration: {0}") class NotFound(DelfinException): msg_fmt = _("Resource could not be found.") code = 404 class NoSuchAction(NotFound): msg_fmt = _("There is no such action: {0}") class AccessInfoNotFound(NotFound): msg_fmt = _("Access information for storage {0} could not be found.") class AlertSourceNotFound(NotFound): msg_fmt = _("Alert source for storage {0} could not be found.") class AlertSourceNotFoundWithHost(NotFound): msg_fmt = _("Alert source could not be found with host {0}.") class SNMPConnectionFailed(BadRequest): msg_fmt = _("Connection to SNMP server failed: {0}") class StorageNotFound(NotFound): msg_fmt = _("Storage {0} could not be found.") class StorageBackendNotFound(NotFound): msg_fmt = _("Storage backend could not be found.") class StoragePoolNotFound(NotFound): msg_fmt = _("Storage pool {0} could not be found.") class VolumeNotFound(NotFound): msg_fmt = _("Volume {0} could not be found.") class StorageHostInitiatorNotFound(NotFound): msg_fmt = _("Storage host initiator {0} could not be found.") class StorageHostNotFound(NotFound): msg_fmt = _("Storage host {0} could not be found.") class StorageHostGroupNotFound(NotFound): msg_fmt = _("Storage host group {0} could not be found.") class PortGroupNotFound(NotFound): msg_fmt = _("Port group {0} could not be found.") class VolumeGroupNotFound(NotFound): msg_fmt = _("Volume group {0} could not be found.") class MaskingViewNotFound(NotFound): msg_fmt = _("Masking View {0} could not be found.") class StorageHostGrpHostRelNotFound(NotFound): msg_fmt = _("Storage Host Group Host Relation {0} could not be found.") class PortGrpPortRelNotFound(NotFound): msg_fmt = _("Port Group Port Relation {0} could not be found.") class VolGrpVolRelationNotFound(NotFound): msg_fmt = _("Volume Group Volume Relation {0} could not be found.") class ControllerNotFound(NotFound): msg_fmt = _("Controller {0} could not be found.") class ControllerListNotFound(NotFound): msg_fmt = _("Controller List for {0} could not be found.") class PortNotFound(NotFound): msg_fmt = _("Port {0} could not be found.") class PortListNotFound(NotFound): msg_fmt = _("Port List for {0} could not be found.") class DiskNotFound(NotFound): msg_fmt = _("Disk {0} could not be found.") class FilesystemNotFound(NotFound): msg_fmt = _("Filesystem {0} could not be found.") class QtreeNotFound(NotFound): msg_fmt = _("Qtree {0} could not be found.") class QuotaNotFound(NotFound): msg_fmt = _("Quota {0} could not be found.") class ShareNotFound(NotFound): msg_fmt = _("Share {0} could not be found.") class StorageDriverNotFound(NotFound): msg_fmt = _("Storage driver '{0}'could not be found.") class TaskNotFound(NotFound): msg_fmt = _("Task {0} could not be found.") class FailedTaskNotFound(NotFound): msg_fmt = _("Failed task {0} could not be found.") class ConfigNotFound(NotFound): msg_fmt = _("Could not find config at {0}.") class PasteAppNotFound(NotFound): msg_fmt = _("Could not load paste app '{0}' from {1}.") class StorageBackendException(DelfinException): msg_fmt = _("Exception from Storage Backend: {0}.") class SSHException(DelfinException): msg_fmt = _("Exception in SSH protocol negotiation or logic. {0}") class SSHInjectionThreat(DelfinException): msg_fmt = _("SSH command injection detected: {0}.") # Tooz locking class LockCreationFailed(DelfinException): msg_fmt = _('Unable to create lock. Coordination backend not started.') class LockAcquisitionFailed(DelfinException): msg_fmt = _('Lock acquisition failed.') class DuplicateExtension(DelfinException): msg_fmt = _('Found duplicate extension: {0}.') class ImproperIPVersion(DelfinException): msg_fmt = _("Provided improper IP version {0}.") class ConnectTimeout(DelfinException): msg_fmt = _("Connect timeout.") code = 500 class InvalidUsernameOrPassword(DelfinException): msg_fmt = _("Invalid username or password.") code = 400 class BadResponse(Invalid): msg_fmt = _('Bad response from server') code = 500 class InvalidPrivateKey(DelfinException): msg_fmt = _("not a valid RSA private key.") code = 400 class SSHConnectTimeout(DelfinException): msg_fmt = _("SSH connect timeout.") code = 500 class SSHNotFoundKnownHosts(NotFound): msg_fmt = _("{0} not found in known_hosts.") code = 400 class StorageClearAlertFailed(DelfinException): msg_fmt = _("Failed to clear alert. Reason: {0}.") class StorageListAlertFailed(DelfinException): msg_fmt = _("Failed to list alerts. Reason: {0}.") class HTTPConnectionTimeout(DelfinException): msg_fmt = _("HTTP connection timeout: {0}.") class InvalidCAPath(DelfinException): msg_fmt = _("Invalid CA path: {0}.") class StoragePerformanceCollectionFailed(DelfinException): msg_fmt = _("Failed to collect performance metrics. Reason: {0}.") class SSLCertificateFailed(Invalid): msg_fmt = _("SSL Certificate Failed.") code = 400 class SSLHandshakeFailed(Invalid): msg_fmt = _("SSL handshake failure.") class StorageIsSyncing(Invalid): msg_fmt = _("Storage {0} is syncing now, please try again later.") class InvalidIpOrPort(DelfinException): msg_fmt = _("Invalid ip or port.") code = 400 class InvalidStorageCapability(Invalid): msg_fmt = _("Invalid capability response: {0}") code = 500 class StorageCapabilityNotSupported(Invalid): msg_fmt = _("Capability feature not supported by storage") code = 501 class EmptyResourceMetrics(DelfinException): msg_fmt = _("Empty resource metric in capabilities") code = 501 class TelemetryTaskExecError(DelfinException): msg_fmt = _("Failure in telemetry task execution") class ComponentNotFound(NotFound): msg_fmt = _("Component {0} could not be found.") class IncompleteTrapInformation(DelfinException): msg_fmt = _("Incomplete trap information." "Storage {0} alert information needs to be synchronized.") class StorageMaxUserCountException(DelfinException): msg_fmt = _( "Exception from storage of users has reached the upper limit: {0}.") ================================================ FILE: delfin/exporter/__init__.py ================================================ ================================================ FILE: delfin/exporter/base_exporter.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log import six from stevedore import extension from delfin import exception from delfin.i18n import _ LOG = log.getLogger(__name__) exporter_opts = [ cfg.ListOpt('alert_exporters', default=['AlertExporterExample'], help="Which exporters for alert push."), cfg.ListOpt('performance_exporters', default=['PerformanceExporterExample'], help="Which exporters for performance push."), ] CONF = cfg.CONF CONF.register_opts(exporter_opts) class BaseExporter(object): """Base class for data exporter.""" def dispatch(self, ctxt, data): """Dispatch data to the third platforms. :param ctxt: delfin.RequestContext :param data: The data to be pushed, it's a list with dict item. :type data: list """ raise NotImplementedError() class BaseManager(BaseExporter): def __init__(self, namespace): self.extension_manager = extension.ExtensionManager(namespace) self.exporters = self._get_exporters() def dispatch(self, ctxt, data): if not isinstance(data, (list, tuple)): data = [data] for exporter in self.exporters: try: exporter.dispatch(ctxt, data) except exception.DelfinException as e: err_msg = _("Failed to export data (%s).") % e.msg LOG.exception(err_msg) except Exception as e: err_msg = six.text_type(e) LOG.exception(err_msg) def _get_exporters(self): """Get exporters from configuration file which shall be supported in entry points. """ supported_exporters = self._get_supported_exporters() configured_exporters = self._get_configured_exporters() return [cls() for cls in supported_exporters if cls.__name__ in configured_exporters] def _get_supported_exporters(self): """Get all supported exporters from entry points file.""" return [ext.plugin for ext in self.extension_manager] def _get_configured_exporters(self): """Get exporters from configuration file.""" raise NotImplementedError() class AlertExporterManager(BaseManager): NAMESPACE = 'delfin.alert.exporters' def __init__(self): super(AlertExporterManager, self).__init__(self.NAMESPACE) def _get_configured_exporters(self): return CONF.alert_exporters class PerformanceExporterManager(BaseManager): NAMESPACE = 'delfin.performance.exporters' def __init__(self): super(PerformanceExporterManager, self).__init__(self.NAMESPACE) def _get_configured_exporters(self): return CONF.performance_exporters ================================================ FILE: delfin/exporter/example.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin.exporter import base_exporter LOG = log.getLogger(__name__) class AlertExporterExample(base_exporter.BaseExporter): def dispatch(self, ctxt, data): LOG.debug("AlertExporterExample, report data: %s" % data) class PerformanceExporterExample(base_exporter.BaseExporter): def dispatch(self, ctxt, data): LOG.debug("PerformanceExporterExample, report data: %s" % data) ================================================ FILE: delfin/exporter/kafka/__init__.py ================================================ ================================================ FILE: delfin/exporter/kafka/exporter.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.exporter import base_exporter from delfin.exporter.kafka import kafka class AlertExporterKafka(base_exporter.BaseExporter): def dispatch(self, ctxt, data): pass class PerformanceExporterKafka(base_exporter.BaseExporter): def dispatch(self, ctxt, data): kafka_obj = kafka.KafkaExporter() kafka_obj.push_to_kafka(data) ================================================ FILE: delfin/exporter/kafka/kafka.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from oslo_config import cfg from oslo_log import log from kafka import KafkaProducer """" The metrics received from driver is should be in this format storage_metrics = [Metric(name='response_time', labels={'storage_id': '1', 'resource_type': 'array'}, values={16009988175: 74.10422968341392, 16009988180: 74.10422968341392}), Metric(name='throughput', labels={'storage_id': '1', 'resource_type': 'array'}, values={16009988188: 68.57886608255163, 16009988190: 68.57886608255163}), Metric(name='read_throughput', labels={'storage_id': '1', 'resource_type': 'array'}, values={1600998817585: 76.60140757331934}), Metric(name='write_throughput', labels={'storage_id': '1', 'resource_type': 'array'}, values={1600998817585: 20.264160223426305})] # metrics and its unit we do support unit_of_metric = {'response_time': 'ms', 'throughput': 'IOPS', 'read_throughput': 'IOPS', 'write_throughput': 'IOPS', 'bandwidth': 'MBps', 'read_bandwidth': 'MBps', 'write_bandwidth': 'MBps' } """ LOG = log.getLogger(__name__) CONF = cfg.CONF kafka_opts = [ cfg.StrOpt('kafka_topic_name', default='delfin-kafka', help='The topic of kafka'), cfg.StrOpt('kafka_ip', default='localhost', help='The kafka server IP'), cfg.StrOpt('kafka_port', default='9092', help='The kafka server port'), ] CONF.register_opts(kafka_opts, "KAFKA_EXPORTER") kafka = CONF.KAFKA_EXPORTER class KafkaExporter(object): def push_to_kafka(self, data): topic = kafka.kafka_topic_name ip = kafka.kafka_ip port = kafka.kafka_port bootstrap_server = ip + ':' + port producer = KafkaProducer( bootstrap_servers=[bootstrap_server], value_serializer=lambda v: json.dumps(v).encode('utf-8')) producer.send(topic, value=data) ================================================ FILE: delfin/exporter/prometheus/__init__.py ================================================ ================================================ FILE: delfin/exporter/prometheus/alert_manager.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests from oslo_config import cfg from oslo_log import log LOG = log.getLogger(__name__) CONF = cfg.CONF alert_mngr_opts = [ cfg.StrOpt('alert_manager_host', default='localhost', help='The prometheus alert manager host'), cfg.StrOpt('alert_manager_port', default='9093', help='The prometheus alert manager port'), ] CONF.register_opts(alert_mngr_opts, "PROMETHEUS_ALERT_MANAGER_EXPORTER") alert_cfg = CONF.PROMETHEUS_ALERT_MANAGER_EXPORTER class PrometheusAlertExporter(object): alerts = [] model_key = ['alert_id', 'alert_name', 'sequence_number', 'category', 'severity', 'type', 'location', 'recovery_advice', 'storage_id', 'storage_name', 'vendor', 'model', 'serial_number', 'occur_time'] def push_prometheus_alert(self, alerts): host = alert_cfg.alert_manager_host port = alert_cfg.alert_manager_port for alert in alerts: dict = {} dict["labels"] = {} dict["annotations"] = {} for key in self.model_key: dict["labels"][key] = str(alert.get(key)) dict["annotations"]["summary"] = alert.get("description") self.alerts.append(dict) try: response = requests.post('http://' + host + ":" + port + '/api/v1/alerts', json=self.alerts) if response.status_code != 200: LOG.error("POST request failed for alert %s ", alert.get('alert_id')) except Exception: LOG.error("Exporting alert to alert manager has been failed " "for alert %s ", alert.get('alert_id')) ================================================ FILE: delfin/exporter/prometheus/exporter.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin.exporter import base_exporter from delfin.exporter.prometheus import prometheus, alert_manager LOG = log.getLogger(__name__) class AlertExporterPrometheus(base_exporter.BaseExporter): def dispatch(self, ctxt, data): alert_manager_obj = alert_manager.PrometheusAlertExporter() alert_manager_obj.push_prometheus_alert(data) class PerformanceExporterPrometheus(base_exporter.BaseExporter): def dispatch(self, ctxt, data): prometheus_obj = prometheus.PrometheusExporter() prometheus_obj.push_to_prometheus(data) ================================================ FILE: delfin/exporter/prometheus/exporter_server.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import six from flask import Flask from oslo_config import cfg import sys from oslo_log import log LOG = log.getLogger(__name__) app = Flask(__name__) grp = cfg.OptGroup('PROMETHEUS_EXPORTER') METRICS_CACHE_DIR = '/var/lib/delfin/metrics' prometheus_opts = [ cfg.StrOpt('metric_server_ip', default='0.0.0.0', help='The exporter server host ip'), cfg.IntOpt('metric_server_port', default=8195, help='The exporter server port'), cfg.StrOpt('metrics_dir', default=METRICS_CACHE_DIR, help='The temp directory to keep incoming metrics'), ] cfg.CONF.register_opts(prometheus_opts, group=grp) cfg.CONF(sys.argv[1:]) @app.route("/metrics", methods=['GET']) def getfile(): """Read the earliest metric file from the available *.prom files """ try: if not os.path.exists(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir): LOG.error('No metrics cache folder exists') return '' os.chdir(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir) except OSError as e: LOG.error('Error opening metrics folder') raise Exception(e) try: files = glob.glob("*.prom") data = '' if files: files.sort(key=os.path.getmtime) # Read only earliest file in one scrape to /metrics file_name = os.path.join(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir, files[0]) with open(file_name, "r") as f: data = f.read() # Remove a metric file after reading it LOG.info('Metric file %s has been read', file_name) os.remove(file_name) LOG.info('Metric file %s has been deleted', file_name) except Exception as e: msg = six.text_type(e) LOG.error('Error while reading metrics %s', msg) return '' return data if __name__ == '__main__': app.run(host=cfg.CONF.PROMETHEUS_EXPORTER.metric_server_ip, port=cfg.CONF.PROMETHEUS_EXPORTER.metric_server_port) ================================================ FILE: delfin/exporter/prometheus/prometheus.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import glob import os import six from oslo_config import cfg from oslo_log import log LOG = log.getLogger(__name__) grp = cfg.OptGroup('PROMETHEUS_EXPORTER') METRICS_CACHE_DIR = '/var/lib/delfin/metrics' # Metrics file retention time RETENTION_TIME_SEC = 3600 prometheus_opts = [ cfg.StrOpt('metrics_dir', default=METRICS_CACHE_DIR, help='The temp directory to keep incoming metrics'), cfg.StrOpt('timezone', default='local', help='time zone of prometheus server ' ), ] cfg.CONF.register_opts(prometheus_opts, group=grp) """" The metrics received from driver is should be in this format storage_metrics = [Metric(name='response_time', labels={'storage_id': '1', 'resource_type': 'array'}, values={16009988175: 74.10422968341392, 16009988180: 74.10422968341392}), Metric(name='throughput', labels={'storage_id': '1', 'resource_type': 'array'}, values={16009988188: 68.57886608255163, 16009988190: 68.57886608255163}), Metric(name='read_throughput', labels={'storage_id': '1', 'resource_type': 'array'}, values={1600998817585: 76.60140757331934}), Metric(name='write_throughput', labels={'storage_id': '1', 'resource_type': 'array'}, values={1600998817585: 20.264160223426305})] """ class PrometheusExporter(object): def __init__(self): self.metrics_dir = cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir def check_metrics_dir_exists(self, directory): try: if not os.path.exists(directory): os.makedirs(directory) return True except Exception as e: msg = six.text_type(e) LOG.error("Error while creating metrics directory. Reason: %s", msg) return False # Print metrics in Prometheus format. def _write_to_prometheus_format(self, f, metric, labels, prom_labels, values): f.write("# HELP %s metric for resource %s and instance %s\n" % (metric, labels.get('resource_type'), labels.get('resource_id'))) f.write("# TYPE %s gauge\n" % metric) for timestamp, value in values.items(): f.write("%s{%s} %f %d\n" % (metric, prom_labels, value, timestamp)) def get_file_age(self, path): # Getting ctime of the file/folder # Time will be in seconds ctime = os.stat(path).st_ctime # Returning the time return ctime def clean_old_metric_files(self, metrics_dir): os.chdir(metrics_dir) files = glob.glob("*.prom") for file in files: file_age = datetime.datetime.now().timestamp() \ - self.get_file_age(file) if file_age >= RETENTION_TIME_SEC: LOG.info("Removing metric file %s" " as it crossed the retention period", file) os.remove(file) def push_to_prometheus(self, storage_metrics): if not self.check_metrics_dir_exists(self.metrics_dir): return try: self.clean_old_metric_files(self.metrics_dir) except Exception: LOG.error('Error while cleaning old metrics files') time_stamp = str(datetime.datetime.now().timestamp()) temp_file_name = os.path.join(self.metrics_dir, time_stamp + ".prom.temp") actual_file_name = os.path.join(self.metrics_dir, time_stamp + ".prom") # make a temp file with current timestamp with open(temp_file_name, "w") as f: for metric in storage_metrics: name = metric.name labels = metric.labels values = metric.values storage_id = labels.get('storage_id') storage_name = labels.get('name') storage_sn = labels.get('serial_number') resource_type = labels.get('resource_type') resource_id = labels.get('resource_id') unit = labels.get('unit') m_type = labels.get('type', 'RAW') value_type = labels.get('value_type', 'gauge') prom_labels = ( "storage_id=\"%s\"," "storage_name=\"%s\"," "storage_sn=\"%s\"," "resource_type=\"%s\"," "resource_id=\"%s\"," "type=\"%s\"," "unit=\"%s\"," "value_type=\"%s\"" % (storage_id, storage_name, storage_sn, resource_type, resource_id, m_type, unit, value_type)) name = labels.get('resource_type') + '_' + name self._write_to_prometheus_format(f, name, labels, prom_labels, values) # this is done so that the exporter server never see an incomplete file try: f.close() os.renames(temp_file_name, actual_file_name) LOG.info('A new metric file %s has been generated', actual_file_name) except Exception: LOG.error('Error while renaming the temporary metric file') ================================================ FILE: delfin/i18n.py ================================================ # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . """ import oslo_i18n DOMAIN = 'delfin' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ================================================ FILE: delfin/leader_election/__init__.py ================================================ ================================================ FILE: delfin/leader_election/distributor/__init__.py ================================================ ================================================ FILE: delfin/leader_election/distributor/perf_job_manager.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from delfin import manager from delfin.leader_election.distributor import task_distributor LOG = log.getLogger(__name__) class PerfJobManager(manager.Manager): """Generate job to job distributor""" RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): super(PerfJobManager, self).__init__(*args, **kwargs) def add_new_job(self, context, task_id): distributor = task_distributor.TaskDistributor(context) distributor.distribute_new_job(task_id) ================================================ FILE: delfin/leader_election/distributor/task_distributor.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_config import cfg from oslo_log import log from delfin import db from delfin.coordination import ConsistentHashing from delfin.task_manager import metrics_rpcapi as task_rpcapi CONF = cfg.CONF LOG = log.getLogger(__name__) class TaskDistributor(object): def __init__(self, ctx): self.ctx = ctx self.task_rpcapi = task_rpcapi.TaskAPI() def distribute_new_job(self, task_id): partitioner = ConsistentHashing() partitioner.start() executor = partitioner.get_task_executor(task_id) try: db.task_update(self.ctx, task_id, {'executor': executor}) LOG.info('Distribute a new job, id: %s' % task_id) self.task_rpcapi.assign_job(self.ctx, task_id, executor) except Exception as e: LOG.error('Failed to distribute the new job, reason: %s', six.text_type(e)) raise e def distribute_failed_job(self, failed_task_id, executor): try: db.failed_task_update(self.ctx, failed_task_id, {'executor': executor}) LOG.info('Distribute a failed job, id: %s' % failed_task_id) self.task_rpcapi.assign_failed_job(self.ctx, failed_task_id, executor) except Exception as e: LOG.error('Failed to distribute failed job, reason: %s', six.text_type(e)) raise e ================================================ FILE: delfin/leader_election/factory.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.leader_election.tooz.callback import ToozLeaderElectionCallback from delfin.leader_election.tooz.leader_elector import Elector from delfin.task_manager.scheduler.schedule_manager import SchedulerManager LEADER_ELECTION_KEY = "delfin-performance-metric-collection" class LeaderElectionFactory: @staticmethod def construct_elector(plugin, leader_key=None): """ Construct leader election elector based on specified plugin :param string plugin: required plugin for leader election """ # Maintain a unique key for metric collection leader election leader_election_key = LEADER_ELECTION_KEY if leader_key: leader_election_key = leader_key scheduler_mgr = SchedulerManager() if plugin == "tooz": scheduler_mgr.start() # Create callback object callback = ToozLeaderElectionCallback.register( on_leading_callback=scheduler_mgr.schedule_boot_jobs, on_stop_callback=scheduler_mgr.stop) return Elector(callback, leader_election_key) else: raise ValueError(plugin) ================================================ FILE: delfin/leader_election/interface.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Leader election interface defined""" import six import abc @six.add_metaclass(abc.ABCMeta) class LeaderCallback: def __init__(self): self.on_started_leading_callback = None """on_started_leading is called when elected as leader""" self.on_stopped_leading_callback = None """on_stopped_leading is called when Leader give up its leadership""" @abc.abstractmethod def on_started_leading(self, *args, **kwargs): pass @abc.abstractmethod def on_stopped_leading(self, *args, **kwargs): pass @classmethod def register(cls, on_leading_callback, on_stop_callback): callback = cls() callback.on_started_leading_callback = on_leading_callback callback.on_stopped_leading_callback = on_stop_callback return callback @six.add_metaclass(abc.ABCMeta) class LeaderElector: def __init__(self, callbacks, election_key): self.callbacks = callbacks self.election_key = election_key @abc.abstractmethod def run(self): """kick start leader election. Invoke callback.on_started_leading callback once elected as leader Invoke callback.on_stopped_leading callback once lose leadership run returns once leader losses its leadership """ pass @abc.abstractmethod def cleanup(self): """Cleanup leader election residue """ pass ================================================ FILE: delfin/leader_election/tooz/__init__.py ================================================ ================================================ FILE: delfin/leader_election/tooz/callback.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.leader_election.interface import LeaderCallback class ToozLeaderElectionCallback(LeaderCallback): def on_started_leading(self, *args, **kwargs): return self.on_started_leading_callback() def on_stopped_leading(self, *args, **kwargs): return self.on_stopped_leading_callback() ================================================ FILE: delfin/leader_election/tooz/leader_elector.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Leader elector is leased based leader election""" import threading from oslo_log import log from oslo_utils import timeutils from delfin.coordination import LeaderElectionCoordinator from delfin.leader_election.interface import LeaderElector LOG = log.getLogger(__name__) class Elector(LeaderElector): def __init__(self, callbacks, leader_election_key): key = leader_election_key.encode('ascii') super(Elector, self).__init__(callbacks, key) self._coordinator = None self.leader = False self._stop = threading.Event() self._runner = None def run(self): if self._coordinator: return self._stop.clear() self._coordinator = LeaderElectionCoordinator() self._coordinator.start() self._coordinator.ensure_group(self.election_key) self._coordinator.join_group() self._coordinator. \ register_on_start_leading_callback(self. callbacks.on_started_leading) # Register internal callback to notify being a leader self._coordinator. \ register_on_start_leading_callback(self.set_leader_callback) while not self._stop.is_set(): with timeutils.StopWatch() as w: LOG.debug("sending heartbeats for leader election") wait_until_next_beat = self._coordinator.send_heartbeat() ran_for = w.elapsed() has_to_sleep_for = wait_until_next_beat - ran_for if has_to_sleep_for < 0: LOG.warning( "Heart beating took too long to execute (it ran for" " %0.2f seconds which is %0.2f seconds longer than" " the next heartbeat idle time). This may cause" " timeouts (in locks, leadership, ...) to" " happen (which will not end well).", ran_for, ran_for - wait_until_next_beat) # Check if coordinator is still a leader if self.leader and not self._coordinator.is_still_leader(): self.on_stopped_leading() self.leader = False return self._coordinator.start_leader_watch() if self.leader: # Adjust time for leader has_to_sleep_for = has_to_sleep_for / 2 LOG.debug('resting after leader watch as leader=%(leader)s ' 'for heartbeat timeout of %(timeout)s sec', {'timeout': has_to_sleep_for, 'leader': self.leader}) self._stop.wait(has_to_sleep_for) def set_leader_callback(self, *args, **kwargs): self.leader = True def cleanup(self): if not self._stop.is_set(): self._stop.set() if self.leader: self.on_stopped_leading() self.leader = False if self._coordinator: self._coordinator.stop() self._coordinator = None def on_stopped_leading(self): self.callbacks.on_stopped_leading() ================================================ FILE: delfin/manager.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo_config import cfg from oslo_log import log from oslo_service import periodic_task from delfin.db import base from delfin import version CONF = cfg.CONF LOG = log.getLogger(__name__) class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class Manager(base.Base, PeriodicTasks): @property def RPC_API_VERSION(self): """Redefine this in child classes.""" raise NotImplementedError @property def target(self): """This property is used by oslo_messaging. https://wiki.openstack.org/wiki/Oslo/Messaging#API_Version_Negotiation """ if not hasattr(self, '_target'): import oslo_messaging as messaging self._target = messaging.Target(version=self.RPC_API_VERSION) return self._target def __init__(self, host=None, db_driver=None): if not host: host = CONF.host self.host = host self.additional_endpoints = [] super(Manager, self).__init__(db_driver) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Handle initialization if this is a standalone service. Child classes should override this method. """ pass def service_version(self, context): return version.version_string() def service_config(self, context): config = {} for key in CONF: config[key] = CONF.get(key, None) return config def is_service_ready(self): """Method indicating if service is ready. This method should be overridden by subclasses which will return False when the back end is not ready yet. """ return True ================================================ FILE: delfin/rpc.py ================================================ # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils import delfin.context import delfin.exception from delfin import utils CONF = cfg.CONF TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ delfin.exception.__name__, ] EXTRA_EXMODS = [] def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport(conf, allowed_remote_exmods=exmods) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods) if utils.notifications_enabled(conf): json_serializer = messaging.JsonPayloadSerializer() serializer = RequestContextSerializer(json_serializer) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) else: NOTIFIER = utils.DO_NOTHING def initialized(): return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFICATION_TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFIER = NOTIFICATION_TRANSPORT = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return delfin.context.RequestContext.from_dict(context) def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None access_policy = dispatcher.DefaultRPCAccessPolicy serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer, access_policy=access_policy) @utils.if_notifications_enabled def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) ================================================ FILE: delfin/service.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import inspect import os import random import eventlet import oslo_messaging as messaging from eventlet import event from oslo_config import cfg from oslo_log import log from oslo_service import loopingcall from oslo_service import service from oslo_service import threadgroup from oslo_service import wsgi from oslo_utils import importutils from delfin import context from delfin import coordination from delfin import rpc from delfin.leader_election.factory import LeaderElectionFactory LOG = log.getLogger(__name__) service_opts = [ cfg.BoolOpt('periodic_enable', default=True, help='If enable periodic task.'), cfg.IntOpt('periodic_interval', default=60, help='Seconds between running periodic tasks.'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='Range of seconds to randomly delay when starting the ' 'periodic task scheduler to reduce stampeding. ' '(Disable by setting to 0)'), cfg.HostAddressOpt('delfin_listen', default="::", help='IP address for Delfin API to listen ' 'on.'), cfg.PortOpt('delfin_listen_port', default=8190, help='Port for Delfin API to listen on.'), cfg.IntOpt('delfin_workers', default=1, help='Number of workers for Delfin API service.'), cfg.BoolOpt('delfin_use_ssl', default=False, help='Wraps the socket in a SSL context if True is set. ' 'A certificate file and key file must be specified.'), cfg.HostAddressOpt('trap_receiver_address', default="0.0.0.0", help='IP address at which trap receiver listens.'), cfg.PortOpt('trap_receiver_port', default=162, help='Port at which trap receiver listens.'), cfg.StrOpt('leader_election_plugin', default="tooz", help='Supported plugin for leader election. Options: ' 'tooz(Default)'), ] CONF = cfg.CONF CONF.register_opts(service_opts) class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports it state to the database services table. """ def __init__(self, host, binary, topic, manager, periodic_enable=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, coordination=False, *args, **kwargs): super(Service, self).__init__() if not rpc.initialized(): rpc.init(CONF) self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, service_name=service_name, *args, **kwargs) self.periodic_enable = periodic_enable self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] self.coordinator = coordination def start(self): if self.coordinator: coordination.LOCK_COORDINATOR.start() LOG.info('Starting %(topic)s node.', {'topic': self.topic}) LOG.debug("Creating RPC server for service %s.", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] endpoints.extend(self.manager.additional_endpoints) self.rpcserver = rpc.get_server(target, endpoints) self.rpcserver.start() self.manager.init_host() if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = loopingcall.FixedIntervalLoopingCall( self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, periodic_enable=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, coordination=False, *args, **kwargs): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'delfin-' part :param manager: defaults to CONF._manager :param periodic_enable: defaults to CONF.periodic_enable :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary if not manager: subtopic = topic.rpartition('delfin-')[2] manager = CONF.get('%s_manager' % subtopic, None) if periodic_enable is None: periodic_enable = CONF.periodic_enable if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, periodic_enable=periodic_enable, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name, coordination=coordination, *args, **kwargs) return service_obj def kill(self): """Destroy the service object in the datastore.""" self.stop() def stop(self, graceful=False): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: if hasattr(self, 'rpcserver'): self.rpcserver.stop() except Exception as e: LOG.error('Stop the rpc server failed, the reason is %s.', e) for x in self.timers: try: x.stop() except Exception as e: LOG.error('Stop the timers failed, the reason is %s.', e) if self.coordinator: try: coordination.LOCK_COORDINATOR.stop() except Exception: LOG.exception("Unable to stop the Tooz Locking " "Coordinator.") self.timers = [] super(Service, self).stop(graceful) def wait(self): for x in self.timers: try: x.wait() except Exception: pass def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) class AlertService(Service): """Service object for triggering trap receiver functionalities. """ @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, coordination=False, *args, **kwargs): kwargs['trap_receiver_address'] = CONF.trap_receiver_address kwargs['trap_receiver_port'] = CONF.trap_receiver_port service_obj = super(AlertService, cls).create( host=host, binary=binary, topic=topic, manager=manager, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name, coordination=coordination, *args, **kwargs) return service_obj def start(self): super(AlertService, self).start() self.manager.start() def stop(self): try: self.manager.stop() except Exception: pass super(AlertService, self).stop() class TaskService(Service): """Service object for triggering task manager functionalities. """ @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, coordination=False, *args, **kwargs): service_obj = super(TaskService, cls).create( host=host, binary=binary, topic=topic, manager=manager, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name, coordination=coordination, *args, **kwargs) return service_obj def start(self): super(TaskService, self).start() class MetricsService(Service): """Service object for triggering metrics manager functionalities. """ @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, coordination=False, *args, **kwargs): service_obj = super(MetricsService, cls).create( host=host, binary=binary, topic=topic, manager=manager, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name, coordination=coordination, *args, **kwargs) return service_obj def start(self): super(MetricsService, self).start() self.manager.init_scheduler(self.topic, self.host) class LeaderElectionService(service.Service): """Leader election service for distributed system The service takes callback functions and leader election unique key to synchronize leaders in distributed environment """ def __init__(self, leader_elector, *args, **kwargs): super(LeaderElectionService, self).__init__() self.leader_elector = leader_elector self._tg = threadgroup.ThreadGroup() self._stop = event.Event() def start(self): """Start leader election service """ def run_leader_service(stop): while not stop.ready(): try: # Start/restart participating in leader election LOG.info("Starting leader election service") self.leader_elector.run() except Exception as e: LOG.error("Exception in leader election run [%s]" % e) try: # Cleanup and again start participating for leadership LOG.info("Cleaning leader election residue") self.leader_elector.cleanup() except Exception as e: LOG.error("Exception in leader election cleanup [%s]" % e) # Wait for grace period LOG.info( "Waiting till grace period[%s] to restart leader " "election" % CONF.coordination.lease_timeout) eventlet.greenthread.sleep(CONF.coordination.lease_timeout) self._tg.add_thread(run_leader_service, self._stop) def __getattr__(self, key): leader = self.__dict__.get('leader', None) return getattr(leader, key) @classmethod def create(cls, *args, **kwargs): """Instantiates class and passes back application object. """ leader_elector = LeaderElectionFactory.construct_elector( CONF.leader_election_plugin) service_obj = cls(leader_elector, *args, **kwargs) return service_obj def kill(self): self.stop() def stop(self, graceful=False): # Stop leader election service if not self._stop.ready(): self._stop.send() try: # cleanup after stop if self.leader_elector: self.leader_elector.cleanup() except Exception as e: LOG.warning("Exception in leader election cleanup [%s]" % e) # Reap thread group: self.tg.stop(graceful) super(LeaderElectionService, self).stop(graceful) def wait(self): self._tg.wait() class WSGIService(service.ServiceBase): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None, coordination=False): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader(CONF) if not rpc.initialized(): rpc.init(CONF) self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = getattr(CONF, '%s_workers' % name, None) self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False) if self.workers is not None and self.workers < 1: LOG.warning( "Value of config option %(name)s_workers must be integer " "greater than 1. Input value ignored.", {'name': name}) # Reset workers to default self.workers = None self.server = wsgi.Server( CONF, name, self.app, host=self.host, port=self.port, use_ssl=self.use_ssl ) self.coordinator = coordination def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if fl not in CONF: return None manager_class_name = CONF.get(fl, None) if not manager_class_name: return None manager_class = importutils.import_class(manager_class_name) return manager_class() def start(self): """Start serving this service using loaded configuration. Also, retrieve updated port number in case '0' was passed in, which indicates a random port should be used. :returns: None """ if self.coordinator: coordination.LOCK_COORDINATOR.start() if self.manager: self.manager.init_host() self.server.start() self.port = self.server.port def stop(self): """Stop serving this API. :returns: None """ try: self.server.stop() except Exception: pass self._stop_coordinator() def wait(self): """Wait for the service to stop serving this API. :returns: None """ self.server.wait() self._stop_coordinator() def reset(self): """Reset server greenpool size to default. :returns: None """ self.server.reset() def _stop_coordinator(self): if self.coordinator: try: coordination.LOCK_COORDINATOR.stop() except Exception: LOG.exception("Unable to stop the Tooz Locking " "Coordinator.") def process_launcher(): return service.ServiceLauncher(CONF, restart_method='reload') # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None def serve(server, workers=None): global _launcher if not _launcher: _launcher = service.Launcher(CONF, restart_method='mutate') _launcher.launch_service(server, workers=workers) def wait(): CONF.log_opt_values(LOG, log.DEBUG) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop() rpc.cleanup() ================================================ FILE: delfin/ssl_utils.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import requests from oslo_config import cfg from oslo_log import log from urllib3 import PoolManager from OpenSSL.crypto import load_certificate, FILETYPE_PEM from delfin import exception LOG = log.getLogger(__name__) CONF = cfg.CONF FILE = 'configs.json' def get_storage_ca_path(): return CONF.storage_driver.ca_path def verify_ca_path(ca_path): """ Checking the ca_path exists """ if not os.path.exists(ca_path): LOG.error("Directory {0} could not be found.".format(ca_path)) raise exception.InvalidCAPath(ca_path) def _load_cert(fpath, file, ca_path): with open(fpath, "rb") as f: cert_content = f.read() cert = load_certificate(FILETYPE_PEM, cert_content) hash_val = cert.subject_name_hash() hash_hex = hex(hash_val).strip('0x') + ".0" linkfile = ca_path + hash_hex if os.path.exists(linkfile): LOG.debug("Link for {0} already exist.". format(file)) else: LOG.info("Create link file {0} for {1}.". format(linkfile, fpath)) os.symlink(fpath, linkfile) def reload_certificate(ca_path): """ Checking the driver security config validation. As required by requests, ca_path must be a directory prepared using the c_rehash tool included with OpenSSL. Once new certificate added, this function can be called for update. If there is a CA certificate chain, all CA certificates along this chain should be included in a single file. """ suffixes = ['.pem', '.cer', '.crt', '.crl'] files = os.listdir(ca_path) for file in files: if not os.path.isdir(file): suf = os.path.splitext(file)[1] if suf in suffixes: fpath = ca_path + file _load_cert(fpath, file, ca_path) def get_host_name_ignore_adapter(): return HostNameIgnoreAdapter() class HostNameIgnoreAdapter(requests.adapters.HTTPAdapter): def cert_verify(self, conn, url, verify, cert): conn.assert_hostname = False return super(HostNameIgnoreAdapter, self).cert_verify( conn, url, verify, cert) def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs): self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs) ================================================ FILE: delfin/task_manager/__init__.py ================================================ ================================================ FILE: delfin/task_manager/manager.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ **periodical task manager** """ from oslo_log import log from oslo_utils import importutils from delfin import manager from delfin.drivers import manager as driver_manager from delfin.drivers import api as driver_api from delfin.task_manager.tasks import alerts, telemetry LOG = log.getLogger(__name__) class TaskManager(manager.Manager): """manage periodical tasks""" RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): self.alert_task = alerts.AlertSyncTask() self.telemetry_task = telemetry.TelemetryTask() super(TaskManager, self).__init__(*args, **kwargs) def sync_storage_resource(self, context, storage_id, resource_task): LOG.debug("Received the sync_storage task: {0} request for storage" " id:{1}".format(resource_task, storage_id)) cls = importutils.import_class(resource_task) device_obj = cls(context, storage_id) device_obj.sync() def remove_storage_resource(self, context, storage_id, resource_task): cls = importutils.import_class(resource_task) device_obj = cls(context, storage_id) device_obj.remove() def remove_storage_in_cache(self, context, storage_id): LOG.info('Remove storage device in memory for storage id:{0}' .format(storage_id)) driver_api.API().remove_storage(context, storage_id) drivers = driver_manager.DriverManager() drivers.remove_driver(storage_id) def sync_storage_alerts(self, context, storage_id, query_para): LOG.info('Alert sync called for storage id:{0}' .format(storage_id)) self.alert_task.sync_alerts(context, storage_id, query_para) def clear_storage_alerts(self, context, storage_id, sequence_number_list): LOG.info('Clear alerts called for storage id: {0}' .format(storage_id)) return self.alert_task.clear_alerts(context, storage_id, sequence_number_list) ================================================ FILE: delfin/task_manager/metrics_manager.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ periodical task manager for metric collection tasks** """ from apscheduler.schedulers.background import BackgroundScheduler import datetime import six from oslo_log import log from oslo_config import cfg from oslo_utils import uuidutils from oslo_service import service as oslo_ser from delfin import context as ctxt from delfin.coordination import ConsistentHashing, GroupMembership from delfin import db from delfin import exception from delfin import manager from delfin import service from delfin.task_manager.scheduler import schedule_manager from delfin.task_manager import subprocess_rpcapi as rpcapi from delfin.task_manager.scheduler.schedulers.telemetry.job_handler \ import FailedJobHandler from delfin.task_manager.scheduler.schedulers.telemetry.job_handler \ import JobHandler LOG = log.getLogger(__name__) CONF = cfg.CONF class MetricsTaskManager(manager.Manager): """manage periodical tasks""" RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): super(MetricsTaskManager, self).__init__(*args, **kwargs) scheduler = schedule_manager.SchedulerManager() scheduler.start() partitioner = ConsistentHashing() partitioner.start() partitioner.join_group() self.watch_job_id = None self.cleanup_job_id = None self.group = None self.watcher = None self.scheduler = None self.rpcapi = rpcapi.SubprocessAPI() self.executor_map = {} self.enable_sub_process = CONF.telemetry.enable_dynamic_subprocess if self.enable_sub_process: self.scheduler = BackgroundScheduler() self.scheduler.start() self.schedule_boot_jobs(self.host) def assign_job(self, context, task_id, executor): if not self.enable_sub_process: instance = JobHandler.get_instance(context, task_id) instance.schedule_job(task_id) else: if not self.watch_job_id: self.init_watchers(executor) local_executor = self.get_local_executor( context, task_id, None, executor) self.rpcapi.assign_job_local(context, task_id, local_executor) def remove_job(self, context, task_id, executor): if not self.enable_sub_process: instance = JobHandler.get_instance(context, task_id) instance.remove_job(task_id) else: job = db.task_get(context, task_id) storage_id = job['storage_id'] for name in self.executor_map.keys(): if storage_id in self.executor_map[name]["storages"]: local_executor = "{0}:{1}".format(executor, name) self.rpcapi.remove_job_local( context, task_id, local_executor) tasks, failed_tasks = self.get_all_tasks(storage_id) if len(failed_tasks) == 0 and len(tasks) == 0: self.stop_executor(name, local_executor, storage_id) def assign_failed_job(self, context, failed_task_id, executor): if not self.enable_sub_process: instance = FailedJobHandler.get_instance(context, failed_task_id) instance.schedule_failed_job(failed_task_id) else: if not self.watch_job_id: self.init_watchers(executor) local_executor = self.get_local_executor( context, None, failed_task_id, executor) self.rpcapi.assign_failed_job_local( context, failed_task_id, local_executor) def remove_failed_job(self, context, failed_task_id, executor): if not self.enable_sub_process: instance = FailedJobHandler.get_instance(context, failed_task_id) instance.remove_failed_job(failed_task_id) else: job = db.failed_task_get(context, failed_task_id) storage_id = job['storage_id'] for name in self.executor_map.keys(): if storage_id in self.executor_map[name]["storages"]: local_executor = "{0}:{1}".format(executor, name) self.rpcapi.remove_failed_job_local( context, failed_task_id, local_executor) tasks, failed_tasks = self.get_all_tasks(storage_id) if len(failed_tasks) == 0 and len(tasks) == 0: self.stop_executor(name, local_executor, storage_id) def schedule_boot_jobs(self, executor): """Schedule periodic collection if any task is currently assigned to this executor """ try: filters = {'executor': executor, 'deleted': False} context = ctxt.get_admin_context() tasks = db.task_get_all(context, filters=filters) failed_tasks = db.failed_task_get_all(context, filters=filters) LOG.info("Scheduling boot time jobs for this executor: total " "jobs to be handled :%s" % len(tasks)) for task in tasks: self.assign_job(context, task['id'], executor) LOG.debug('Periodic collection job assigned for id: ' '%s ' % task['id']) for failed_task in failed_tasks: self.assign_failed_job(context, failed_task['id'], executor) LOG.debug('Failed job assigned for id: ' '%s ' % failed_task['id']) except Exception as e: LOG.error("Failed to schedule boot jobs for this executor " "reason: %s.", six.text_type(e)) else: LOG.debug("Boot job scheduling completed.") def init_watchers(self, group): watcher = GroupMembership(agent_id=group) watcher.start() watcher.create_group(group) LOG.info('Created child process membership group {0}.' 'Initial members of group: {1}' .format(group, watcher.get_members(group))) watcher.register_watcher_func(group, self.on_process_join, self.on_process_leave) self.group = group self.watcher = watcher self.watch_job_id = uuidutils.generate_uuid() self.scheduler.add_job(watcher.watch_group_change, 'interval', seconds=CONF.telemetry. group_change_detect_interval, next_run_time=datetime.datetime.now(), id=self.watch_job_id) LOG.info('Created watch for group membership change for group {0}.' .format(group)) self.cleanup_job_id = uuidutils.generate_uuid() self.scheduler.add_job(self.process_cleanup, 'interval', seconds=CONF.telemetry.process_cleanup_interval, next_run_time=datetime.datetime.now(), id=self.cleanup_job_id) LOG.info('Created process cleanup background job for group {0}.' .format(group)) def on_process_join(self, event): LOG.info('Member %s joined the group %s' % (event.member_id, event.group_id)) host = event.group_id.decode('utf-8') if self.watcher: LOG.info('Processes in current node {0}' .format(self.watcher.get_members(host))) def on_process_leave(self, event): LOG.info('Member %s left the group %s' % (event.member_id, event.group_id)) executor_topic = event.member_id.decode('utf-8') name = executor_topic.split(':')[1] if name in self.executor_map.keys(): host = event.group_id.decode('utf-8') LOG.info("Re-create process {0} in {1} that is handling tasks" .format(executor_topic, host)) launcher = self.create_process(executor_topic, host) self.executor_map[name]["launcher"] = launcher context = ctxt.get_admin_context() for storage_id in self.executor_map[name]["storages"]: tasks, failed_tasks = self.get_all_tasks(storage_id) for task in tasks: LOG.info("Re-scheduling task {0} of storage {1}" .format(task['id'], storage_id)) self.rpcapi.assign_job_local( context, task['id'], executor_topic) for f_task in failed_tasks: LOG.info("Re-scheduling failed failed task {0}," " of storage {1}" .format(f_task['id'], storage_id)) self.rpcapi.assign_failed_job_local( context, f_task['id'], executor_topic) def process_cleanup(self): LOG.info('Periodic process cleanup called') executor_names = self.executor_map.keys() # Collect all names to delete names_to_delete = [] for name in executor_names: if len(self.executor_map[name]["storages"]) == 0: delay = self.executor_map[name]["cleanup_delay"] if delay < 0: LOG.info("Cleanup delay for local executor {0} expired" .format(name)) names_to_delete.append(name) else: LOG.info("Delay cleanup for local executor {0} for {1}" .format(name, delay)) delay = delay - CONF.telemetry.process_cleanup_interval self.executor_map[name]["cleanup_delay"] = delay # Delete names for name in names_to_delete: self.executor_map[name]["launcher"].stop() self.executor_map.pop(name) def create_process(self, topic=None, host=None): metrics_task_server = service. \ MetricsService.create(binary='delfin-task', topic=topic, host=host, manager='delfin.' 'task_manager.' 'subprocess_manager.' 'SubprocessManager', coordination=False) launcher = oslo_ser.ProcessLauncher(CONF) launcher.launch_service(metrics_task_server, workers=1) return launcher def get_local_executor(self, context, task_id, failed_task_id, executor): executor_names = self.executor_map.keys() storage_id = None if task_id: job = db.task_get(context, task_id) storage_id = job['storage_id'] elif failed_task_id: job = db.failed_task_get(context, failed_task_id) storage_id = job['storage_id'] else: raise exception.InvalidInput("Missing task id") # Storage already exists for name in executor_names: executor_topic = "{0}:{1}".format(executor, name) if storage_id in self.executor_map[name]["storages"]: return executor_topic # Return existing executor_topic for name in executor_names: no_of_storages = len(self.executor_map[name]["storages"]) if no_of_storages and (no_of_storages < CONF.telemetry.max_storages_in_child): executor_topic = "{0}:{1}".format(executor, name) LOG.info("Selecting existing local executor {0} for {1}" .format(executor_topic, storage_id)) self.executor_map[name]["storages"].append(storage_id) return executor_topic # Return executor_topic after creating one for index in range(CONF.telemetry.max_childs_in_node): name = "executor_{0}".format(index + 1) if name not in executor_names: executor_topic = "{0}:{1}".format(executor, name) LOG.info("Create a new local executor {0} for {1}" .format(executor_topic, storage_id)) launcher = self.create_process( topic=executor_topic, host=executor) self.executor_map[name] = { "storages": [storage_id], "launcher": launcher, "cleanup_delay": 0 } return executor_topic msg = "Reached maximum number of ({0}) local executors". \ format(CONF.telemetry.max_childs_in_node) LOG.error(msg) raise RuntimeError(msg) def get_all_tasks(self, storage_id): filters = {'storage_id': storage_id, 'deleted': False} context = ctxt.get_admin_context() tasks = db.task_get_all(context, filters=filters) failed_tasks = db.failed_task_get_all(context, filters=filters) return tasks, failed_tasks def stop_executor(self, name, local_executor, storage_id): LOG.info("Stop and remove local executor {0}" .format(local_executor)) if storage_id in self.executor_map[name]["storages"]: self.executor_map[name]["storages"].remove(storage_id) self.executor_map[name]["cleanup_delay"] = \ CONF.telemetry.task_cleanup_delay def stop(self): """Cleanup periodic jobs""" if self.watch_job_id: self.scheduler.remove_job(self.watch_job_id) if self.cleanup_job_id: self.scheduler.remove_job(self.cleanup_job_id) if self.group and self.watcher: self.watcher.delete_group(self.group) if self.watcher: self.watcher.stop() if self.scheduler: self.scheduler.shutdown() self.watch_job_id = None self.cleanup_job_id = None self.group = None self.watcher = None ================================================ FILE: delfin/task_manager/metrics_rpcapi.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Client side of the metrics task manager RPC API. """ import oslo_messaging as messaging from oslo_config import cfg from delfin import rpc CONF = cfg.CONF class TaskAPI(object): """Client side of the metrics task rpc API. API version history: 1.0 - Initial version. """ RPC_API_VERSION = '1.0' def __init__(self): super(TaskAPI, self).__init__() self.target = messaging.Target(topic=CONF.host, version=self.RPC_API_VERSION) self.client = rpc.get_client(self.target, version_cap=self.RPC_API_VERSION) def get_client(self, topic): target = messaging.Target(topic=topic, version=self.RPC_API_VERSION) return rpc.get_client(target, version_cap=self.RPC_API_VERSION) def assign_job(self, context, task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=True) return call_context.cast(context, 'assign_job', task_id=task_id, executor=executor) def remove_job(self, context, task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=True) return call_context.cast(context, 'remove_job', task_id=task_id, executor=executor) def assign_failed_job(self, context, failed_task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=True) return call_context.cast(context, 'assign_failed_job', failed_task_id=failed_task_id, executor=executor) def remove_failed_job(self, context, failed_task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=True) return call_context.cast(context, 'remove_failed_job', failed_task_id=failed_task_id, executor=executor) def create_perf_job(self, context, task_id): rpc_client = self.get_client('JobGenerator') call_context = rpc_client.prepare(topic='JobGenerator', version='1.0') return call_context.cast(context, 'add_new_job', task_id=task_id) ================================================ FILE: delfin/task_manager/perf_job_controller.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Client side of the metrics task manager RPC API. """ from oslo_config import cfg from oslo_log import log from delfin import db from delfin import exception from delfin.common import constants from delfin.task_manager import metrics_rpcapi LOG = log.getLogger(__name__) CONF = cfg.CONF def create_perf_job(context, storage_id, capabilities): # Add it to db # Check resource_metric attribute availability and # check if resource_metric is empty if 'resource_metrics' not in capabilities \ or not bool(capabilities.get('resource_metrics')): raise exception.EmptyResourceMetrics() task = dict() task.update(storage_id=storage_id) task.update(args=capabilities.get('resource_metrics')) task.update(interval=capabilities.get('collect_interval') if capabilities.get('collect_interval') else CONF.telemetry.performance_collection_interval) task.update(method=constants.TelemetryCollection.PERFORMANCE_TASK_METHOD) db.task_create(context=context, values=task) # Add it to RabbitMQ filters = {'storage_id': storage_id} task_id = db.task_get_all(context, filters=filters)[0].get('id') metrics_rpcapi.TaskAPI().create_perf_job(context, task_id) def delete_perf_job(context, storage_id): # Delete it from scheduler filters = {'storage_id': storage_id} tasks = db.task_get_all(context, filters=filters) failed_tasks = db.failed_task_get_all(context, filters=filters) for task in tasks: metrics_rpcapi.TaskAPI().remove_job(context, task.get('id'), task.get('executor')) for failed_task in failed_tasks: metrics_rpcapi.TaskAPI().remove_failed_job(context, failed_task.get('id'), failed_task.get('executor')) # Soft delete tasks db.task_delete_by_storage(context, storage_id) db.failed_task_delete_by_storage(context, storage_id) ================================================ FILE: delfin/task_manager/rpcapi.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Client side of the task manager RPC API. """ import oslo_messaging as messaging from oslo_config import cfg from delfin import rpc CONF = cfg.CONF class TaskAPI(object): """Client side of the task rpc API. API version history: 1.0 - Initial version. """ RPC_API_VERSION = '1.0' def __init__(self): super(TaskAPI, self).__init__() target = messaging.Target(topic=CONF.delfin_task_topic, version=self.RPC_API_VERSION) self.client = rpc.get_client(target, version_cap=self.RPC_API_VERSION) def sync_storage_resource(self, context, storage_id, resource_task): call_context = self.client.prepare(version='1.0') return call_context.cast(context, 'sync_storage_resource', storage_id=storage_id, resource_task=resource_task) def collect_telemetry(self, context, storage_id, telemetry_task, args, start_time, end_time): call_context = self.client.prepare(version='1.0') return call_context.call(context, 'collect_telemetry', storage_id=storage_id, telemetry_task=telemetry_task, args=args, start_time=start_time, end_time=end_time) def remove_storage_resource(self, context, storage_id, resource_task): call_context = self.client.prepare(version='1.0') return call_context.cast(context, 'remove_storage_resource', storage_id=storage_id, resource_task=resource_task) def remove_storage_in_cache(self, context, storage_id): call_context = self.client.prepare(version='1.0', fanout=True) return call_context.cast(context, 'remove_storage_in_cache', storage_id=storage_id) def remove_telemetry_instances(self, context, storage_id, telemetry_task): call_context = self.client.prepare(version='1.0', fanout=True) return call_context.cast(context, 'remove_telemetry_instances', storage_id=storage_id, telemetry_task=telemetry_task) def sync_storage_alerts(self, context, storage_id, query_para): call_context = self.client.prepare(version='1.0') return call_context.cast(context, 'sync_storage_alerts', storage_id=storage_id, query_para=query_para) def clear_storage_alerts(self, context, storage_id, sequence_number_list): call_context = self.client.prepare(version='1.0') return call_context.call(context, 'clear_storage_alerts', storage_id=storage_id, sequence_number_list=sequence_number_list) ================================================ FILE: delfin/task_manager/scheduler/__init__.py ================================================ ================================================ FILE: delfin/task_manager/scheduler/schedule_manager.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import six from apscheduler.schedulers.background import BackgroundScheduler from oslo_log import log from oslo_utils import uuidutils from delfin import context from delfin import db from delfin import service from delfin import utils from delfin.coordination import ConsistentHashing from delfin.leader_election.distributor.task_distributor \ import TaskDistributor from delfin.task_manager import metrics_rpcapi as task_rpcapi LOG = log.getLogger(__name__) @six.add_metaclass(utils.Singleton) class SchedulerManager(object): GROUP_CHANGE_DETECT_INTERVAL_SEC = 30 def __init__(self, scheduler=None): if not scheduler: scheduler = BackgroundScheduler() self.scheduler = scheduler self.scheduler_started = False self.ctx = context.get_admin_context() self.task_rpcapi = task_rpcapi.TaskAPI() self.watch_job_id = None def start(self): """ Initialise the schedulers for periodic job creation """ if not self.scheduler_started: self.scheduler.start() self.scheduler_started = True def on_node_join(self, event): # A new node joined the group, all the job would be re-distributed. # If the job is already on the node, it would be ignore and would # not be scheduled again LOG.info('Member %s joined the group %s' % (event.member_id, event.group_id)) # Get all the jobs filters = {'deleted': False} tasks = db.task_get_all(self.ctx, filters=filters) distributor = TaskDistributor(self.ctx) partitioner = ConsistentHashing() partitioner.start() for task in tasks: # Get the specific executor origin_executor = task['executor'] # If the target executor is different from current executor, # remove the job from old executor and add it to new executor new_executor = partitioner.get_task_executor(task['id']) if new_executor != origin_executor: LOG.info('Re-distribute job %s from %s to %s' % (task['id'], origin_executor, new_executor)) self.task_rpcapi.remove_job(self.ctx, task['id'], task['executor']) distributor.distribute_new_job(task['id']) failed_tasks = db.failed_task_get_all(self.ctx, filters=filters) for failed_task in failed_tasks: # Get the parent task executor task = db.task_get(self.ctx, failed_task['task_id']) origin_executor = failed_task['executor'] new_executor = task['executor'] # If the target executor is different from current executor, # remove the job from old executor and add it to new executor if new_executor != origin_executor: LOG.info('Re-distribute failed_job %s from %s to %s' % (failed_task['id'], origin_executor, new_executor)) self.task_rpcapi.remove_failed_job( self.ctx, failed_task['id'], failed_task['executor']) distributor.distribute_failed_job(failed_task['id'], task['executor']) partitioner.stop() def on_node_leave(self, event): LOG.info('Member %s left the group %s' % (event.member_id, event.group_id)) filters = {'executor': event.member_id.decode('utf-8'), 'deleted': False} re_distribute_tasks = db.task_get_all(self.ctx, filters=filters) distributor = TaskDistributor(self.ctx) for task in re_distribute_tasks: distributor.distribute_new_job(task['id']) re_distribute_failed_tasks = db.failed_task_get_all(self.ctx, filters=filters) for failed_task in re_distribute_failed_tasks: task = db.task_get(self.ctx, failed_task['task_id']) executor = task['executor'] distributor.distribute_failed_job(failed_task['id'], executor) def schedule_boot_jobs(self): # Recover the job in db self.recover_job() self.recover_failed_job() # Start the consumer of job creation message job_generator = service. \ TaskService.create(binary='delfin-task', topic='JobGenerator', manager='delfin.' 'leader_election.' 'distributor.' 'perf_job_manager.' 'PerfJobManager', coordination=True) service.serve(job_generator) partitioner = ConsistentHashing() partitioner.start() partitioner.register_watcher_func(self.on_node_join, self.on_node_leave) self.watch_job_id = uuidutils.generate_uuid() self.scheduler.add_job(partitioner.watch_group_change, 'interval', seconds=self.GROUP_CHANGE_DETECT_INTERVAL_SEC, next_run_time=datetime.now(), id=self.watch_job_id) def stop(self): """Cleanup periodic jobs""" if self.watch_job_id: self.scheduler.remove_job(self.watch_job_id) def get_scheduler(self): return self.scheduler def recover_job(self): filters = {'deleted': False} all_tasks = db.task_get_all(self.ctx, filters=filters) distributor = TaskDistributor(self.ctx) for task in all_tasks: distributor.distribute_new_job(task['id']) def recover_failed_job(self): filters = {'deleted': False} all_failed_tasks = db.failed_task_get_all(self.ctx, filters=filters) distributor = TaskDistributor(self.ctx) for failed_task in all_failed_tasks: task = db.task_get(self.ctx, failed_task['task_id']) executor = task['executor'] distributor.distribute_failed_job(failed_task['id'], executor) ================================================ FILE: delfin/task_manager/scheduler/schedulers/__init__.py ================================================ ================================================ FILE: delfin/task_manager/scheduler/schedulers/telemetry/__init__.py ================================================ ================================================ FILE: delfin/task_manager/scheduler/schedulers/telemetry/failed_performance_collection_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_config import cfg from oslo_log import log from delfin import db from delfin import exception from delfin.common.constants import TelemetryJobStatus, TelemetryCollection from delfin.db.sqlalchemy.models import FailedTask from delfin.db.sqlalchemy.models import Task from delfin.i18n import _ from delfin.task_manager.scheduler import schedule_manager from delfin.task_manager.tasks.telemetry import PerformanceCollectionTask from delfin.task_manager import metrics_rpcapi as metrics_task_rpcapi LOG = log.getLogger(__name__) CONF = cfg.CONF class FailedPerformanceCollectionHandler(object): def __init__(self, ctx, failed_task_id, storage_id, args, job_id, retry_count, start_time, end_time, executor): self.ctx = ctx self.failed_task_id = failed_task_id self.retry_count = retry_count self.storage_id = storage_id self.job_id = job_id self.args = args self.start_time = start_time self.end_time = end_time self.metrics_task_rpcapi = metrics_task_rpcapi.TaskAPI() self.scheduler_instance = \ schedule_manager.SchedulerManager().get_scheduler() self.result = TelemetryJobStatus.FAILED_JOB_STATUS_INIT self.executor = executor @staticmethod def get_instance(ctx, failed_task_id): failed_task = db.failed_task_get(ctx, failed_task_id) task = db.task_get(ctx, failed_task[FailedTask.task_id.name]) return FailedPerformanceCollectionHandler( ctx, failed_task[FailedTask.id.name], task[Task.storage_id.name], task[Task.args.name], failed_task[FailedTask.job_id.name], failed_task[FailedTask.retry_count.name], failed_task[FailedTask.start_time.name], failed_task[FailedTask.end_time.name], failed_task[FailedTask.executor.name], ) def __call__(self): # Upon periodic job callback, if storage is already deleted or soft # deleted,do not proceed with failed performance collection flow try: failed_task = db.failed_task_get(self.ctx, self.failed_task_id) if failed_task["deleted"]: LOG.debug('Storage %s getting deleted, ignoring ' 'performance collection cycle for failed task id %s.' % (self.storage_id, self.failed_task_id)) return except exception.FailedTaskNotFound: LOG.debug('Storage %s already deleted, ignoring ' 'performance collection cycle for failed task id %s.' % (self.storage_id, self.failed_task_id)) return self.retry_count = self.retry_count + 1 try: telemetry = PerformanceCollectionTask() status = telemetry.collect(self.ctx, self.storage_id, self.args, self.start_time, self.end_time) if not status: raise exception.TelemetryTaskExecError() except Exception as e: LOG.error(e) msg = _("Failed to collect performance metrics for storage " "id:{0}, reason:{1}".format(self.storage_id, six.text_type(e))) LOG.error(msg) else: LOG.info("Successfully completed Performance metrics collection " "for storage id :{0} ".format(self.storage_id)) self.result = TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS self._stop_task() return if self.retry_count >= TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT: msg = _( "Failed to collect performance metrics of task instance " "id:{0} for start time:{1} and end time:{2} with " "maximum retry. Giving up on " "retry".format(self.failed_task_id, self.start_time, self.end_time)) LOG.error(msg) self._stop_task() return self.result = TelemetryJobStatus.FAILED_JOB_STATUS_RETRYING db.failed_task_update(self.ctx, self.failed_task_id, {FailedTask.retry_count.name: self.retry_count, FailedTask.result.name: self.result}) def _stop_task(self): db.failed_task_update(self.ctx, self.failed_task_id, {FailedTask.retry_count.name: self.retry_count, FailedTask.result.name: self.result}) self.metrics_task_rpcapi.remove_failed_job(self.ctx, self.failed_task_id, self.executor) ================================================ FILE: delfin/task_manager/scheduler/schedulers/telemetry/job_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import six from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils, importutils from delfin import db from delfin.common.constants import TelemetryCollection, TelemetryJobStatus from delfin.exception import TaskNotFound from delfin.i18n import _ from delfin.task_manager import rpcapi as task_rpcapi from delfin.task_manager.scheduler import schedule_manager from delfin.task_manager.tasks.telemetry import PerformanceCollectionTask CONF = cfg.CONF LOG = log.getLogger(__name__) class JobHandler(object): def __init__(self, ctx, task_id, storage_id, args, interval): # create an object of periodic task scheduler self.ctx = ctx self.task_id = task_id self.storage_id = storage_id self.args = args self.interval = interval self.task_rpcapi = task_rpcapi.TaskAPI() self.scheduler = schedule_manager.SchedulerManager().get_scheduler() self.stopped = False self.job_ids = set() @staticmethod def get_instance(ctx, task_id): task = db.task_get(ctx, task_id) return JobHandler(ctx, task_id, task['storage_id'], task['args'], task['interval']) def perform_history_collection(self, start_time, end_time, last_run_time): # Trigger one historic collection to make sure we do not # miss any Data points due to reschedule LOG.debug('Triggering one historic collection for task %s', self.task_id) try: telemetry = PerformanceCollectionTask() ret = telemetry.collect(self.ctx, self.storage_id, self.args, start_time, end_time) LOG.debug('Historic collection performed for task %s with ' 'result %s' % (self.task_id, ret)) db.task_update(self.ctx, self.task_id, {'last_run_time': last_run_time}) except Exception as e: msg = _("Failed to collect performance metrics during history " "collection for storage id:{0}, reason:{1}" .format(self.storage_id, six.text_type(e))) LOG.error(msg) def schedule_job(self, task_id): if self.stopped: # If Job is stopped return immediately return LOG.info("JobHandler received A job %s to schedule" % task_id) job = db.task_get(self.ctx, task_id) # Check delete status of the task deleted = job['deleted'] if deleted: return collection_class = importutils.import_class( job['method']) instance = collection_class.get_instance(self.ctx, self.task_id) current_time = int(datetime.now().timestamp()) last_run_time = current_time next_collection_time = last_run_time + job['interval'] job_id = uuidutils.generate_uuid() next_collection_time = datetime \ .fromtimestamp(next_collection_time) \ .strftime('%Y-%m-%d %H:%M:%S') existing_job_id = job['job_id'] scheduler_job = self.scheduler.get_job(existing_job_id) if not (existing_job_id and scheduler_job): LOG.info('JobHandler scheduling a new job') self.scheduler.add_job( instance, 'interval', seconds=job['interval'], next_run_time=next_collection_time, id=job_id, misfire_grace_time=int(job['interval'] / 2)) update_task_dict = {'job_id': job_id} db.task_update(self.ctx, self.task_id, update_task_dict) self.job_ids.add(job_id) LOG.info('Periodic collection tasks scheduled for for job id: ' '%s ' % self.task_id) # Check if historic collection is needed for this task. # If the last run time is already set, adjust start_time based on # last run time or history_on_reschedule which is smaller # If jod id is created but last run time is not yet set, then # adjust start_time based on interval or history_on_reschedule # whichever is smaller end_time = current_time * 1000 # Maximum supported history duration on restart history_on_reschedule = CONF.telemetry. \ performance_history_on_reschedule if job['last_run_time']: start_time = job['last_run_time'] * 1000 \ if current_time - job['last_run_time'] < \ history_on_reschedule \ else (end_time - history_on_reschedule * 1000) self.perform_history_collection(start_time, end_time, last_run_time) elif existing_job_id: interval_in_sec = job['interval'] start_time = (end_time - interval_in_sec * 1000) \ if interval_in_sec < history_on_reschedule \ else (end_time - history_on_reschedule * 1000) self.perform_history_collection(start_time, end_time, last_run_time) else: LOG.info('Job already exists with this scheduler') def stop(self): self.stopped = True for job_id in self.job_ids.copy(): self.remove_scheduled_job(job_id) LOG.info("Stopping telemetry jobs") def remove_scheduled_job(self, job_id): if job_id in self.job_ids: self.job_ids.remove(job_id) if job_id and self.scheduler.get_job(job_id): self.scheduler.remove_job(job_id) def remove_job(self, task_id): try: LOG.info("Received job %s to remove", task_id) job = db.task_get(self.ctx, task_id) job_id = job['job_id'] self.remove_scheduled_job(job_id) except Exception as e: LOG.error("Failed to remove periodic scheduling job , reason: %s.", six.text_type(e)) class FailedJobHandler(object): def __init__(self, ctx): # create an object of periodic failed task scheduler self.scheduler = schedule_manager.SchedulerManager().get_scheduler() self.ctx = ctx self.stopped = False self.job_ids = set() @staticmethod def get_instance(ctx, failed_task_id): return FailedJobHandler(ctx) def schedule_failed_job(self, failed_task_id): if self.stopped: return try: job = db.failed_task_get(self.ctx, failed_task_id) retry_count = job['retry_count'] result = job['result'] job_id = job['job_id'] if retry_count >= \ TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT or \ result == TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS: LOG.info("Exiting Failure task processing for task [%d] " "with result [%s] and retry count [%d] " % (job['id'], result, retry_count)) self._teardown_task(self.ctx, job['id'], job_id) return # If job already scheduled, skip if job_id and self.scheduler.get_job(job_id): return try: db.task_get(self.ctx, job['task_id']) except TaskNotFound as e: LOG.info("Removing failed telemetry job as parent job " "do not exist: %s", six.text_type(e)) # tear down if original task is not available self._teardown_task(self.ctx, job['id'], job_id) return if not (job_id and self.scheduler.get_job(job_id)): job_id = uuidutils.generate_uuid() db.failed_task_update(self.ctx, job['id'], {'job_id': job_id}) collection_class = importutils.import_class( job['method']) instance = \ collection_class.get_instance(self.ctx, job['id']) self.scheduler.add_job( instance, 'interval', seconds=job['interval'], next_run_time=datetime.now(), id=job_id, misfire_grace_time=int(job['interval'] / 2)) self.job_ids.add(job_id) except Exception as e: LOG.error("Failed to schedule retry tasks for performance " "collection, reason: %s", six.text_type(e)) else: LOG.info("Schedule collection completed") def _teardown_task(self, ctx, failed_task_id, job_id): db.failed_task_delete(ctx, failed_task_id) self.remove_scheduled_job(job_id) def remove_scheduled_job(self, job_id): if job_id in self.job_ids: self.job_ids.remove(job_id) if job_id and self.scheduler.get_job(job_id): self.scheduler.remove_job(job_id) def stop(self): self.stopped = True for job_id in self.job_ids.copy(): self.remove_scheduled_job(job_id) def remove_failed_job(self, failed_task_id): try: LOG.info("Received failed job %s to remove", failed_task_id) job = db.failed_task_get(self.ctx, failed_task_id) job_id = job['job_id'] self.remove_scheduled_job(job_id) db.failed_task_delete(self.ctx, job['id']) LOG.info("Removed failed_task entry %s ", job['id']) except Exception as e: LOG.error("Failed to remove periodic scheduling job , reason: %s.", six.text_type(e)) @classmethod def job_interval(cls): return TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL ================================================ FILE: delfin/task_manager/scheduler/schedulers/telemetry/performance_collection_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import six from oslo_config import cfg from oslo_log import log from delfin import db from delfin import exception from delfin.common.constants import TelemetryCollection from delfin.db.sqlalchemy.models import FailedTask from delfin.drivers import api as driverapi from delfin.task_manager import metrics_rpcapi as metrics_task_rpcapi from delfin.task_manager.scheduler import schedule_manager from delfin.task_manager.scheduler.schedulers.telemetry. \ failed_performance_collection_handler import \ FailedPerformanceCollectionHandler from delfin.task_manager.tasks.telemetry import PerformanceCollectionTask CONF = cfg.CONF LOG = log.getLogger(__name__) CONF = cfg.CONF class PerformanceCollectionHandler(object): def __init__(self, ctx, task_id, storage_id, args, interval, executor): self.ctx = ctx self.task_id = task_id self.storage_id = storage_id self.args = args self.interval = interval self.metric_task_rpcapi = metrics_task_rpcapi.TaskAPI() self.driver_api = driverapi.API() self.executor = executor self.scheduler = schedule_manager.SchedulerManager().get_scheduler() @staticmethod def get_instance(ctx, task_id): task = db.task_get(ctx, task_id) return PerformanceCollectionHandler(ctx, task_id, task['storage_id'], task['args'], task['interval'], task['executor']) def __call__(self): # Upon periodic job callback, if storage is already deleted or soft # deleted,do not proceed with performance collection flow try: task = db.task_get(self.ctx, self.task_id) if task["deleted"]: LOG.debug('Storage %s getting deleted, ignoring performance ' 'collection cycle for task id %s.' % (self.storage_id, self.task_id)) return except exception.TaskNotFound: LOG.debug('Storage %s already deleted, ignoring performance ' 'collection cycle for task id %s.' % (self.storage_id, self.task_id)) return # Handles performance collection from driver and dispatch start_time = None end_time = None try: LOG.debug('Collecting performance metrics for task id: %s' % self.task_id) current_time = int(datetime.now().timestamp()) # Times are epoch time in milliseconds overlap = CONF.telemetry. \ performance_timestamp_overlap end_time = current_time * 1000 start_time = end_time - (self.interval * 1000) - (overlap * 1000) telemetry = PerformanceCollectionTask() status = telemetry.collect(self.ctx, self.storage_id, self.args, start_time, end_time) db.task_update(self.ctx, self.task_id, {'last_run_time': current_time}) if not status: raise exception.TelemetryTaskExecError() except Exception as e: LOG.error("Failed to collect performance metrics for " "task id :{0}, reason:{1}".format(self.task_id, six.text_type(e))) self._handle_task_failure(start_time, end_time) else: LOG.debug("Performance collection done for storage id :{0}" ",task id :{1} and interval(in sec):{2}" .format(self.storage_id, self.task_id, self.interval)) def _handle_task_failure(self, start_time, end_time): failed_task_interval = TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL try: # Fetch driver's capability for performance metric retention window # If driver supports it and if it is within collection range, # consider it for failed task scheduling capabilities = self.driver_api.get_capabilities(self.ctx, self.storage_id) performance_metric_retention_window \ = capabilities.get('performance_metric_retention_window') if capabilities.get('failed_job_collect_interval'): failed_task_interval = \ capabilities.get('failed_job_collect_interval') if performance_metric_retention_window: collection_window = performance_metric_retention_window \ if performance_metric_retention_window <= CONF.telemetry \ .max_failed_task_retry_window \ else CONF.telemetry.max_failed_task_retry_window failed_task_interval = collection_window / TelemetryCollection\ .MAX_FAILED_JOB_RETRY_COUNT except Exception as e: LOG.error("Failed to get driver capabilities during failed task " "scheduling for storage id :{0}, reason:{1}" .format(self.storage_id, six.text_type(e))) failed_task = {FailedTask.storage_id.name: self.storage_id, FailedTask.task_id.name: self.task_id, FailedTask.interval.name: failed_task_interval, FailedTask.end_time.name: end_time, FailedTask.start_time.name: start_time, FailedTask.method.name: FailedPerformanceCollectionHandler.__module__ + '.' + FailedPerformanceCollectionHandler.__name__, FailedTask.retry_count.name: 0, FailedTask.executor.name: self.executor} failed_task = db.failed_task_create(self.ctx, failed_task) self.metric_task_rpcapi.assign_failed_job(self.ctx, failed_task['id'], failed_task['executor']) ================================================ FILE: delfin/task_manager/subprocess_manager.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Subprocess metrics manager for metric collection tasks** """ from oslo_log import log from oslo_config import cfg from delfin.coordination import GroupMembership from delfin import manager from delfin.task_manager.scheduler import schedule_manager from delfin.task_manager.scheduler.schedulers.telemetry.job_handler \ import FailedJobHandler from delfin.task_manager.scheduler.schedulers.telemetry.job_handler \ import JobHandler CONF = cfg.CONF LOG = log.getLogger(__name__) class SubprocessManager(manager.Manager): """manage periodical collection tasks in subprocesses""" RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): super(SubprocessManager, self).__init__(*args, **kwargs) def init_scheduler(self, topic, host): scheduler = schedule_manager.SchedulerManager() scheduler.start() watcher = GroupMembership(topic) watcher.start() watcher.join_group(host) def assign_job_local(self, context, task_id): instance = JobHandler.get_instance(context, task_id) instance.schedule_job(task_id) def remove_job_local(self, context, task_id): instance = JobHandler.get_instance(context, task_id) instance.remove_job(task_id) def assign_failed_job_local(self, context, failed_task_id): instance = FailedJobHandler.get_instance(context, failed_task_id) instance.schedule_failed_job(failed_task_id) def remove_failed_job_local(self, context, failed_task_id): instance = FailedJobHandler.get_instance(context, failed_task_id) instance.remove_failed_job(failed_task_id) ================================================ FILE: delfin/task_manager/subprocess_rpcapi.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Client side of the subprocess metrics collection manager RPC API. """ import oslo_messaging as messaging from oslo_config import cfg from delfin import rpc CONF = cfg.CONF class SubprocessAPI(object): """Client side of the subprocess metrics manager collection rpc API. API version history: 1.0 - Initial version. """ RPC_API_VERSION = '1.0' def __init__(self): super(SubprocessAPI, self).__init__() self.target = messaging.Target(topic=CONF.host, version=self.RPC_API_VERSION) self.client = rpc.get_client(self.target, version_cap=self.RPC_API_VERSION) def get_client(self, topic): target = messaging.Target(topic=topic, version=self.RPC_API_VERSION) return rpc.get_client(target, version_cap=self.RPC_API_VERSION) def assign_job_local(self, context, task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=False) return call_context.cast(context, 'assign_job_local', task_id=task_id) def remove_job_local(self, context, task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=False) return call_context.cast(context, 'remove_job_local', task_id=task_id) def assign_failed_job_local(self, context, failed_task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=False) return call_context.cast(context, 'assign_failed_job_local', failed_task_id=failed_task_id) def remove_failed_job_local(self, context, failed_task_id, executor): rpc_client = self.get_client(str(executor)) call_context = rpc_client.prepare(topic=str(executor), version='1.0', fanout=False) return call_context.cast(context, 'remove_failed_job_local', failed_task_id=failed_task_id) ================================================ FILE: delfin/task_manager/tasks/__init__.py ================================================ ================================================ FILE: delfin/task_manager/tasks/alerts.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log from delfin import db from delfin import exception from delfin.common import alert_util from delfin.drivers import api as driver_manager from delfin.exporter import base_exporter from delfin.i18n import _ LOG = log.getLogger(__name__) class AlertSyncTask(object): def __init__(self): self.driver_manager = driver_manager.API() self.alert_export_manager = base_exporter.AlertExporterManager() def sync_alerts(self, ctx, storage_id, query_para): """ Syncs all alerts from storage side to exporter """ LOG.info('Syncing alerts for storage id:{0}, query_para: {1}'.format( storage_id, query_para)) try: storage = db.storage_get(ctx, storage_id) current_alert_list = self.driver_manager.list_alerts(ctx, storage_id, query_para) if not current_alert_list: # No alerts to sync LOG.info('No alerts to sync from storage device for ' 'storage id:{0}'.format(storage_id)) return for alert in current_alert_list: alert_util.fill_storage_attributes(alert, storage) self.alert_export_manager.dispatch(ctx, current_alert_list) LOG.info('Syncing storage alerts successful for storage id:{0}' .format(storage_id)) except Exception as e: msg = _('Failed to sync alerts from storage device: {0}' .format(six.text_type(e))) LOG.error(msg) def clear_alerts(self, ctx, storage_id, sequence_number_list): """ Clear alert from storage """ LOG.info('Clear alert for storage id:{0}'.format(storage_id)) sequence_number_list = sequence_number_list or [] failure_list = [] for sequence_number in sequence_number_list: try: self.driver_manager.clear_alert(ctx, storage_id, sequence_number) except (exception.AccessInfoNotFound, exception.StorageNotFound) as e: LOG.warning("Ignore the situation: %s", e.msg) except Exception as e: LOG.error("Failed to clear alert with sequence number: %s " "for storage: %s, reason: %s.", sequence_number, storage_id, six.text_type(e)) failure_list.append(sequence_number) return failure_list ================================================ FILE: delfin/task_manager/tasks/resources.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import decorator from oslo_log import log from delfin import coordination from delfin import db from delfin import exception from delfin.common import constants from delfin.drivers import api as driverapi from delfin.i18n import _ LOG = log.getLogger(__name__) def set_synced_after(): @decorator.decorator def _set_synced_after(func, *args, **kwargs): call_args = inspect.getcallargs(func, *args, **kwargs) self = call_args['self'] sync_result = constants.ResourceSync.SUCCEED ret = None try: ret = func(*args, **kwargs) except Exception: sync_result = constants.ResourceSync.FAILED lock = coordination.Lock(self.storage_id) with lock: try: storage = db.storage_get(self.context, self.storage_id) except exception.StorageNotFound: LOG.warning('Storage %s not found when set synced' % self.storage_id) else: # One sync task done, sync status minus 1 # When sync status get to 0 # means all the sync tasks are completed if storage['sync_status'] != constants.SyncStatus.SYNCED: storage['sync_status'] -= sync_result db.storage_update(self.context, self.storage_id, {'sync_status': storage['sync_status']}) return ret return _set_synced_after def check_deleted(): @decorator.decorator def _check_deleted(func, *args, **kwargs): call_args = inspect.getcallargs(func, *args, **kwargs) self = call_args['self'] ret = func(*args, **kwargs) # When context.read_deleted is 'yes', db.storage_get would # only get the storage whose 'deleted' tag is not default value self.context.read_deleted = 'yes' try: db.storage_get(self.context, self.storage_id) except exception.StorageNotFound: LOG.debug('Storage %s not found when checking deleted' % self.storage_id) else: self.remove() self.context.read_deleted = 'no' return ret return _check_deleted class StorageResourceTask(object): NATIVE_RESOURCE_ID = None def __init__(self, context, storage_id): self.storage_id = storage_id self.context = context self.driver_api = driverapi.API() def _classify_resources(self, storage_resources, db_resources, key): """ :param storage_resources: :param db_resources: :return: it will return three list add_list: the items present in storage but not in current_db. update_list:the items present in storage and in current_db. delete_id_list:the items present not in storage but present in current_db. """ original_ids_in_db = [resource[key] for resource in db_resources] delete_id_list = [resource['id'] for resource in db_resources] add_list = [] update_list = [] for resource in storage_resources: if resource[key] in original_ids_in_db: resource['id'] = db_resources[original_ids_in_db.index( resource[key])]['id'] delete_id_list.remove(resource['id']) update_list.append(resource) else: add_list.append(resource) return add_list, update_list, delete_id_list @check_deleted() @set_synced_after() def sync(self): """ Synchronizing device resources data to database. """ LOG.info('{} sync for storage(id={}) start'.format( self.__class__.__name__, self.storage_id)) try: # list the storage resources from driver and database storage_resources = self.driver_list_resources() db_resources = self.db_resource_get_all( {'storage_id': self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_resources, db_resources, self.NATIVE_RESOURCE_ID) if delete_id_list: self.db_resources_delete(delete_id_list) if update_list: self.db_resources_update(update_list) if add_list: self.db_resources_create(add_list) except NotImplementedError: # Ignore this exception because driver may not support it. pass except Exception as e: msg = _('{} sync for storage(id={}) failed: {}'.format( self.__class__.__name__, self.storage_id, e)) LOG.error(msg) raise else: LOG.info('{} sync for storage(id={}) successful'.format( self.__class__.__name__, self.storage_id)) def remove(self): LOG.info('{} remove for storage(id={})'.format( self.__class__.__name__, self.storage_id)) self.db_resource_delete_by_storage() def driver_list_resources(self): raise NotImplementedError( 'Resource task API driver_list_resources() is not implemented') def db_resource_get_all(self, filters): raise NotImplementedError( 'Resource task API db_resource_get_all() is not implemented') def db_resources_delete(self, delete_id_list): raise NotImplementedError( 'Resource task API db_resources_delete() is not implemented') def db_resources_update(self, update_list): raise NotImplementedError( 'Resource task API db_resources_update() is not implemented') def db_resources_create(self, add_list): raise NotImplementedError( 'Resource task API db_resources_create() is not implemented') def db_resource_delete_by_storage(self): raise NotImplementedError( 'Resource task API db_resource_delete_by_storage() ' 'is not implemented') class StorageDeviceTask(StorageResourceTask): def __init__(self, context, storage_id): super(StorageDeviceTask, self).__init__(context, storage_id) @check_deleted() @set_synced_after() def sync(self): """ :return: """ LOG.info('Syncing storage device for storage id:{0}'.format( self.storage_id)) try: storage = self.driver_api.get_storage(self.context, self.storage_id) db.storage_update(self.context, self.storage_id, storage) except Exception as e: msg = _('Failed to update storage entry in DB: {0}' .format(e)) LOG.error(msg) raise else: LOG.info("Syncing storage successful!!!") def remove(self): LOG.info('Remove storage device for storage id:{0}' .format(self.storage_id)) try: db.storage_delete(self.context, self.storage_id) db.access_info_delete(self.context, self.storage_id) db.alert_source_delete(self.context, self.storage_id) except Exception as e: LOG.error('Failed to update storage entry in DB: {0}'.format(e)) class StoragePoolTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_storage_pool_id' def driver_list_resources(self): return self.driver_api.list_storage_pools( self.context, self.storage_id) def db_resource_get_all(self, filters): return db.storage_pool_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.storage_pools_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.storage_pools_update(self.context, update_list) def db_resources_create(self, add_list): return db.storage_pools_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.storage_pool_delete_by_storage(self.context, self.storage_id) class StorageVolumeTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_volume_id' def driver_list_resources(self): return self.driver_api.list_volumes(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.volume_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.volumes_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.volumes_update(self.context, update_list) def db_resources_create(self, add_list): return db.volumes_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.volume_delete_by_storage(self.context, self.storage_id) class StorageControllerTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_controller_id' def driver_list_resources(self): return self.driver_api.list_controllers(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.controller_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.controllers_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.controllers_update(self.context, update_list) def db_resources_create(self, add_list): return db.controllers_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.controller_delete_by_storage(self.context, self.storage_id) class StoragePortTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_port_id' def driver_list_resources(self): return self.driver_api.list_ports(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.port_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.ports_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.ports_update(self.context, update_list) def db_resources_create(self, add_list): return db.ports_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.port_delete_by_storage(self.context, self.storage_id) class StorageDiskTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_disk_id' def driver_list_resources(self): return self.driver_api.list_disks(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.disk_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.disks_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.disks_update(self.context, update_list) def db_resources_create(self, add_list): return db.disks_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.disk_delete_by_storage(self.context, self.storage_id) class StorageQuotaTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_quota_id' def driver_list_resources(self): return self.driver_api.list_quotas(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.quota_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.quotas_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.quotas_update(self.context, update_list) def db_resources_create(self, add_list): return db.quotas_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.quota_delete_by_storage(self.context, self.storage_id) class StorageFilesystemTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_filesystem_id' def driver_list_resources(self): return self.driver_api.list_filesystems(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.filesystem_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.filesystems_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.filesystems_update(self.context, update_list) def db_resources_create(self, add_list): return db.filesystems_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.filesystem_delete_by_storage(self.context, self.storage_id) class StorageQtreeTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_qtree_id' def driver_list_resources(self): return self.driver_api.list_qtrees(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.qtree_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.qtrees_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.qtrees_update(self.context, update_list) def db_resources_create(self, add_list): return db.qtrees_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.qtree_delete_by_storage(self.context, self.storage_id) class StorageShareTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_share_id' def driver_list_resources(self): return self.driver_api.list_shares(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.share_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.shares_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.shares_update(self.context, update_list) def db_resources_create(self, add_list): return db.shares_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.share_delete_by_storage(self.context, self.storage_id) class StorageHostInitiatorTask(StorageResourceTask): def __init__(self, context, storage_id): super(StorageHostInitiatorTask, self).__init__(context, storage_id) @check_deleted() @set_synced_after() def sync(self): """ :return: """ LOG.info('Syncing storage host initiator for storage id:{0}' .format(self.storage_id)) try: # Collect the storage host initiator list from driver and database storage_host_initiators = self.driver_api \ .list_storage_host_initiators(self.context, self.storage_id) if storage_host_initiators: db.storage_host_initiators_delete_by_storage( self.context, self.storage_id) db.storage_host_initiators_create( self.context, storage_host_initiators) LOG.info('Building storage host initiator successful for ' 'storage id:{0}'.format(self.storage_id)) except AttributeError as e: LOG.error(e) except NotImplementedError: # Ignore this exception because driver may not support it. pass except Exception as e: msg = _('Failed to sync storage host initiators entry ' 'in DB: {0}'.format(e)) LOG.error(msg) else: LOG.info("Syncing storage host initiators successful!!!") def remove(self): LOG.info('Remove storage host initiators for storage id:{0}' .format(self.storage_id)) db.storage_host_initiators_delete_by_storage(self.context, self.storage_id) class StorageHostTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_storage_host_id' def driver_list_resources(self): return self.driver_api.list_storage_hosts(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.storage_hosts_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.storage_hosts_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.storage_hosts_update(self.context, update_list) def db_resources_create(self, add_list): return db.storage_hosts_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.storage_hosts_delete_by_storage(self.context, self.storage_id) class StorageHostGroupTask(StorageResourceTask): def __init__(self, context, storage_id): super(StorageHostGroupTask, self).__init__(context, storage_id) @check_deleted() @set_synced_after() def sync(self): """ :return: """ LOG.info('Syncing storage host group for storage id:{0}' .format(self.storage_id)) try: # Collect the storage host group list from driver and database. # Build relation between host grp and host to be handled here. storage_hg_obj = self.driver_api \ .list_storage_host_groups(self.context, self.storage_id) storage_host_groups = storage_hg_obj['storage_host_groups'] storage_host_rels = storage_hg_obj['storage_host_grp_host_rels'] if storage_host_groups: db.storage_host_grp_host_rels_delete_by_storage( self.context, self.storage_id) db.storage_host_grp_host_rels_create( self.context, storage_host_rels) LOG.info('Building host group relations successful for ' 'storage id:{0}'.format(self.storage_id)) db_storage_host_groups = db.storage_host_groups_get_all( self.context, filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_host_groups, db_storage_host_groups, 'native_storage_host_group_id') LOG.debug('###StorageHostGroupTask for {0}:add={1},delete={2},' 'update={3}'.format(self.storage_id, len(add_list), len(delete_id_list), len(update_list))) if delete_id_list: db.storage_host_groups_delete(self.context, delete_id_list) if update_list: db.storage_host_groups_update(self.context, update_list) if add_list: db.storage_host_groups_create(self.context, add_list) except AttributeError as e: LOG.error(e) except NotImplementedError: # Ignore this exception because driver may not support it. pass except Exception as e: msg = _('Failed to sync storage host groups entry in DB: {0}' .format(e)) LOG.error(msg) else: LOG.info("Syncing storage host groups successful!!!") def remove(self): LOG.info('Remove storage host groups for storage id:{0}' .format(self.storage_id)) db.storage_host_grp_host_rels_delete_by_storage(self.context, self.storage_id) db.storage_host_groups_delete_by_storage(self.context, self.storage_id) class PortGroupTask(StorageResourceTask): def __init__(self, context, storage_id): super(PortGroupTask, self).__init__(context, storage_id) @check_deleted() @set_synced_after() def sync(self): """ :return: """ LOG.info('Syncing port group for storage id:{0}' .format(self.storage_id)) try: # Collect the port groups from driver and database # Build relation between port grp and port to be handled here. port_groups_obj = self.driver_api \ .list_port_groups(self.context, self.storage_id) port_groups = port_groups_obj['port_groups'] port_group_relation_list = port_groups_obj['port_grp_port_rels'] if port_groups: db.port_grp_port_rels_delete_by_storage( self.context, self.storage_id) db.port_grp_port_rels_create( self.context, port_group_relation_list) LOG.info('Building port group relations successful for ' 'storage id:{0}'.format(self.storage_id)) db_port_groups = db.port_groups_get_all( self.context, filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( port_groups, db_port_groups, 'native_port_group_id') LOG.debug('###PortGroupTask for {0}:add={1},delete={2},' 'update={3}'.format(self.storage_id, len(add_list), len(delete_id_list), len(update_list))) if delete_id_list: db.port_groups_delete(self.context, delete_id_list) if update_list: db.port_groups_update(self.context, update_list) if add_list: db.port_groups_create(self.context, add_list) except AttributeError as e: LOG.error(e) except NotImplementedError: # Ignore this exception because driver may not support it. pass except Exception as e: msg = _('Failed to sync port groups entry in DB: {0}'.format(e)) LOG.error(msg) else: LOG.info("Syncing port groups successful!!!") def remove(self): LOG.info('Remove port groups for storage id:{0}' .format(self.storage_id)) db.port_grp_port_rels_delete_by_storage(self.context, self.storage_id) db.port_groups_delete_by_storage(self.context, self.storage_id) class VolumeGroupTask(StorageResourceTask): def __init__(self, context, storage_id): super(VolumeGroupTask, self).__init__(context, storage_id) @check_deleted() @set_synced_after() def sync(self): """ :return: """ LOG.info('Syncing volume group for storage id:{0}' .format(self.storage_id)) try: # Collect the volume groups from driver and database # Build relation between volume grp and volume to be handled here. volume_groups_obj = self.driver_api \ .list_volume_groups(self.context, self.storage_id) volume_groups = volume_groups_obj['volume_groups'] volume_groups_rels = volume_groups_obj['vol_grp_vol_rels'] if volume_groups: db.vol_grp_vol_rels_delete_by_storage( self.context, self.storage_id) db.vol_grp_vol_rels_create(self.context, volume_groups_rels) LOG.info('Building volume group relations successful for ' 'storage id:{0}'.format(self.storage_id)) db_volume_groups = db.volume_groups_get_all( self.context, filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( volume_groups, db_volume_groups, 'native_volume_group_id') LOG.debug('###VolumeGroupTask for {0}:add={1},delete={2},' 'update={3}'.format(self.storage_id, len(add_list), len(delete_id_list), len(update_list))) if delete_id_list: db.volume_groups_delete(self.context, delete_id_list) if update_list: db.volume_groups_update(self.context, update_list) if add_list: db.volume_groups_create(self.context, add_list) except AttributeError as e: LOG.error(e) except NotImplementedError: # Ignore this exception because driver may not support it. pass except Exception as e: msg = _('Failed to sync volume groups entry in DB: {0}'.format(e)) LOG.error(msg) else: LOG.info("Syncing volume groups successful!!!") def remove(self): LOG.info('Remove volume groups for storage id:{0}' .format(self.storage_id)) db.vol_grp_vol_rels_delete_by_storage(self.context, self.storage_id) db.volume_groups_delete_by_storage(self.context, self.storage_id) class MaskingViewTask(StorageResourceTask): NATIVE_RESOURCE_ID = 'native_masking_view_id' def driver_list_resources(self): return self.driver_api.list_masking_views(self.context, self.storage_id) def db_resource_get_all(self, filters): return db.masking_views_get_all(self.context, filters=filters) def db_resources_delete(self, delete_id_list): return db.masking_views_delete(self.context, delete_id_list) def db_resources_update(self, update_list): return db.masking_views_update(self.context, update_list) def db_resources_create(self, add_list): return db.masking_views_create(self.context, add_list) def db_resource_delete_by_storage(self): return db.masking_views_delete_by_storage(self.context, self.storage_id) ================================================ FILE: delfin/task_manager/tasks/telemetry.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from oslo_log import log from delfin import context, db from delfin import exception from delfin.common.constants import TelemetryTaskStatus from delfin.drivers import api as driver_api from delfin.exporter import base_exporter from delfin.i18n import _ LOG = log.getLogger(__name__) class TelemetryTask(object): @abc.abstractmethod def collect(self, ctx, storage_id, args, start_time, end_time): pass @abc.abstractmethod def remove_telemetry(self, ctx, storage_id): pass class PerformanceCollectionTask(TelemetryTask): def __init__(self): self.driver_api = driver_api.API() self.perf_exporter = base_exporter.PerformanceExporterManager() def collect(self, ctx, storage_id, args, start_time, end_time): try: LOG.debug("Performance collection for storage [%s] with start time" " [%s] and end time [%s]" % (storage_id, start_time, end_time)) perf_metrics = self.driver_api \ .collect_perf_metrics(ctx, storage_id, args, start_time, end_time) # Fill extra labels to metric by fetching metadata from resource DB try: storage_details = db.storage_get(ctx, storage_id) for m in perf_metrics: m.labels["name"] = storage_details.name m.labels["serial_number"] = storage_details.serial_number except exception.StorageNotFound: LOG.warning(f'Storage(id={storage_id}) has been removed.') return TelemetryTaskStatus.TASK_EXEC_STATUS_SUCCESS except Exception as e: msg = _('Failed to add extra labels to performance ' 'metrics: {0}'.format(e)) LOG.error(msg) return TelemetryTaskStatus.TASK_EXEC_STATUS_FAILURE self.perf_exporter.dispatch(context, perf_metrics) return TelemetryTaskStatus.TASK_EXEC_STATUS_SUCCESS except Exception as e: LOG.error("Failed to collect performance metrics for " "storage id :{0}, reason:{1}".format(storage_id, six.text_type(e))) return TelemetryTaskStatus.TASK_EXEC_STATUS_FAILURE ================================================ FILE: delfin/test.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ import fixtures from unittest import mock from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture import oslo_messaging from oslo_messaging import conffixture as messaging_conffixture from oslo_utils import uuidutils import oslotest.base as base_test from delfin.common import config # noqa from delfin import coordination from delfin.db.sqlalchemy import api as db_api from delfin.db.sqlalchemy import models as db_models from delfin import rpc from delfin import service from delfin.tests.unit import conf_fixture, fake_notifier test_opts = [ cfg.StrOpt('sqlite_db', default='delfin.sqlite', help='The filename to use with sqlite.'), ] CONF = cfg.CONF CONF.register_opts(test_opts) _DB_CACHE = None class Database(fixtures.Fixture): def __init__(self, db_session, sql_connection): self.sql_connection = sql_connection self.engine = db_session.get_engine() self.engine.dispose() conn = self.engine.connect() db_models.BASE.metadata.create_all(self.engine) self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() def setUp(self): super(Database, self).setUp() conn = self.engine.connect() conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) class TestCase(base_test.BaseTestCase): """Test case base class for all unit tests.""" def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database( db_api, sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE) self.injected = [] self._services = [] # This will be cleaned up by the NestedTempfile fixture lock_path = '/' + self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') self.fixture.config( disable_process_locking=True, group='oslo_concurrency') rpc.add_extra_exmods('delfin.tests') self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_url = 'fake:/' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) oslo_messaging.get_notification_transport(CONF) self.override_config('driver', ['test'], group='oslo_messaging_notifications') rpc.init(CONF) fake_notifier.stub_notifier(self) # Locks must be cleaned up after tests CONF.set_override('backend_type', 'file', group='coordination') CONF.set_override('backend_server', lock_path, group='coordination') coordination.LOCK_COORDINATOR.start() self.addCleanup(coordination.LOCK_COORDINATOR.stop) def tearDown(self): """Runs after each test method to tear down test environment.""" super(TestCase, self).tearDown() # Reset any overridden flags CONF.reset() # Stop any timers for x in self.injected: try: x.stop() except AssertionError: pass # Kill any services for x in self._services: try: x.kill() except Exception: pass # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: del self.__dict__[key] def flags(self, **kw): """Override flag variables for a test.""" for k, v in kw.items(): CONF.set_override(k, v) def start_service(self, name, host=None, **kwargs): host = host or uuidutils.generate_uuid() kwargs.setdefault('host', host) kwargs.setdefault('binary', 'delfin-%s' % name) svc = service.Service.create(**kwargs) svc.start() self._services.append(svc) return svc def mock_object(self, obj, attr_name, new_attr=None, **kwargs): """Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ if not new_attr: new_attr = mock.Mock() patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs) patcher.start() self.addCleanup(patcher.stop) return new_attr def mock_class(self, class_name, new_val=None, **kwargs): """Use python mock to mock a class Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ if not new_val: new_val = mock.Mock() patcher = mock.patch(class_name, new_val, **kwargs) patcher.start() self.addCleanup(patcher.stop) return new_val # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ def raise_assertion(msg): d1str = str(d1) d2str = str(d2) base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' 'd2: %(d2str)s' % {"msg": msg, "d1str": d1str, "d2str": d2str}) raise AssertionError(base_msg) d1keys = set(d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys raise_assertion('Keys in d1 and not d2: %(d1only)s. ' 'Keys in d2 and not d1: %(d2only)s' % {"d1only": d1only, "d2only": d2only}) for key in d1keys: d1value = d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= tolerance except (ValueError, TypeError): # If both values aren't convertible to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue elif approx_equal and within_tolerance: continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % { "key": key, "d1value": d1value, "d2value": d2value }) def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) L2str = str(L2) base_msg = ('List of dictionaries do not match: %(msg)s ' 'L1: %(L1str)s L2: %(L2str)s' % {"msg": msg, "L1str": L1str, "L2str": L2str}) raise AssertionError(base_msg) L1count = len(L1) L2count = len(L2) if L1count != L2count: raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' 'len(L2)=%(L2count)d' % {"L1count": L1count, "L2count": L2count}) for d1, d2 in zip(L1, L2): self.assertDictMatch(d1, d2, approx_equal=approx_equal, tolerance=tolerance) def assertSubDictMatch(self, sub_dict, super_dict): """Assert a sub_dict is subset of super_dict.""" self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) for k, sub_value in sub_dict.items(): super_value = super_dict[k] if isinstance(sub_value, dict): self.assertSubDictMatch(sub_value, super_value) elif 'DONTCARE' in (sub_value, super_value): continue else: self.assertEqual(sub_value, super_value) def assertIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' in 'b'.""" try: f = super(TestCase, self).assertIn except AttributeError: self.assertTrue(a in b, *args, **kwargs) else: f(a, b, *args, **kwargs) def assertNotIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' NOT in 'b'.""" try: f = super(TestCase, self).assertNotIn except AttributeError: self.assertFalse(a in b, *args, **kwargs) else: f(a, b, *args, **kwargs) def assertIsInstance(self, a, b, *args, **kwargs): """Python < v2.7 compatibility.""" try: f = super(TestCase, self).assertIsInstance except AttributeError: self.assertIsInstance(a, b) else: f(a, b, *args, **kwargs) def assertIsNone(self, a, *args, **kwargs): """Python < v2.7 compatibility.""" try: f = super(TestCase, self).assertIsNone except AttributeError: self.assertTrue(a is None) else: f(a, *args, **kwargs) def _dict_from_object(self, obj, ignored_keys): if ignored_keys is None: ignored_keys = [] return {k: v for k, v in obj.items() if k not in ignored_keys} def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): obj_to_dict = lambda o: ( # noqa: E731 self._dict_from_object(o, ignored_keys)) sort_key = lambda d: [d[k] for k in sorted(d)] # noqa: E731 conv_and_sort = lambda obj: ( # noqa: E731 sorted(map(obj_to_dict, obj), key=sort_key)) self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2)) def assert_notify_called(self, mock_notify, calls): for i in range(0, len(calls)): mock_call = mock_notify.call_args_list[i] call = calls[i] posargs = mock_call[0] self.assertEqual(call[0], posargs[0]) self.assertEqual(call[1], posargs[2]) def override_config(self, name, override, group=None): """Cleanly override CONF variables.""" CONF.set_override(name, override, group) self.addCleanup(CONF.clear_override, name, group) ================================================ FILE: delfin/tests/__init__.py ================================================ ================================================ FILE: delfin/tests/e2e/GetResources.robot ================================================ *** Settings *** Documentation Tests to verify that GET of resources Library RequestsLibrary Library Collections Library JSONLibrary Library OperatingSystem Suite Setup Open Application Suite Teardown Close Application *** Variables *** ${delfin_url} http://localhost:8190/v1 ${storage_pools} storage-pools @{res_urls} storage-pools volumes controllers disks ports quotas qtrees filesystems shares @{res_indx} storage_pools volumes controllers disks ports quotas qtrees filesystems shares *** Test Cases *** GET Resources when test storage is registered [Tags] DELFIN FOR ${res_url} ${res_ind} IN ZIP ${res_urls} ${res_indx} ${ret_json}= Get All Resource Of ${res_url} ${res_s}= Get Value From Json ${ret_json} $..${res_ind} Should Not Be Empty ${res_s[0]} END GET Resources with ID [Tags] DELFIN FOR ${res_url} ${res_ind} IN ZIP ${res_urls} ${res_indx} ${ret_json}= Get All Resource Of ${res_url} ${res_s}= Get Value From Json ${ret_json} $..${res_ind} Should Not Be Empty ${res_s[0]} ${resource_ids} Get Value From Json ${ret_json} $..id ${ret_json}= Get All Resource with ID ${res_url} ${resource_ids[0]} Should Not Be Empty ${res_s[0]} END GET Resources with Filter [Tags] DELFIN log to console \n ${storages}= Get All Storages ${storages_id}= Get Value From Json ${storages[0]} $..id FOR ${res_url} ${res_ind} IN ZIP ${res_urls} ${res_indx} ${ret_json}= Get All Resource with Filter ${res_url} storage_id=${storages_id[0]} ${res_s}= Get Value From Json ${ret_json} $..${res_ind} Should Not Be Empty ${res_s[0]} ${ret_json}= Get All Resource with Filter ${res_url} storage_id=123 ${res_s}= Get Value From Json ${ret_json} $..${res_ind} Should Be Empty ${res_s[0]} END GET Resources when no storages are registered [Tags] DELFIN Close Application FOR ${res_url} ${res_ind} IN ZIP ${res_urls} ${res_indx} ${ret_json}= Get All Resource Of ${res_url} ${res_s}= Get Value From Json ${ret_json} $..${res_ind} Should Be Empty ${res_s[0]} END Open Application *** Keywords *** Get All Resource Of [Arguments] ${resource} Create Session delfin ${delfin_url} ${resp_get}= GET On Session delfin ${resource} Status Should Be 200 ${resp_get} [Return] ${resp_get.json()} Get All Resource with ID [Arguments] ${resource} ${resource_id} Create Session delfin ${delfin_url} ${resp_get}= GET On Session delfin ${resource}/${resource_id} Status Should Be 200 ${resp_get} [Return] ${resp_get.json()} Get All Resource with Filter [Arguments] ${resource} ${filter} Create Session delfin ${delfin_url} ${resp_get}= GET On Session delfin ${resource}?${filter} Status Should Be 200 ${resp_get} [Return] ${resp_get.json()} Delete Storage With ID [Arguments] ${storage_id} Create Session delfin ${delfin_url} ${resp_del}= DELETE On Session delfin storages/${storage_id} Status Should Be 202 ${resp_del} Register Test Storage ${test}= Load Json From File ${CURDIR}/test.json ${access_info}= Get Value From Json ${test} $.test_register_access_info Create Session delfin ${delfin_url} ${resp_register}= POST On Session delfin storages json=${access_info[0]} Status Should Be 201 ${resp_register} Dictionary Should Contain Key ${resp_register.json()} id ${storage_id}= Get Value From Json ${resp_register.json()} $..id [Return] ${storage_id[0]} Get All Storages Create Session delfin ${delfin_url} ${resp_get}= GET On Session delfin storages Status Should Be 200 ${resp_get} ${resp_get_storage}= Get Value From Json ${resp_get.json()} $..storages [Return] ${resp_get_storage[0]} Open Application ${array_id}= Register Test Storage Sleep 10s Close Application @{storages}= Get All Storages FOR ${storage} IN @{storages} ${storage_id}= Get Value From Json ${storage} $..id Delete Storage With ID ${storage_id[0]} END Sleep 10s ================================================ FILE: delfin/tests/e2e/GetStorage.robot ================================================ *** Settings *** Documentation Tests to verify that GET of storages Library RequestsLibrary Library Collections Library JSONLibrary Library OperatingSystem *** Variables *** ${delfin_url} http://localhost:8190/v1 *** Test Cases *** GET all Storages when no storages are registered [Tags] DELFIN ${storages}= Get All Storages Should Be Empty ${storages} GET all Storages when two storages are registered [Tags] DELFIN ${storage_id_test}= Register Test Storage ${storage_id_fake}= Register Fake Storage # GET all storages ${storages}= Get All Storages ${id_list}= create list ${storages[0]['id']} ${storages[1]['id']} List should contain value ${id_list} ${storage_id_test} List should contain value ${id_list} ${storage_id_fake} Delete Storage With ID ${storage_id_test} Delete Storage With ID ${storage_id_fake} GET Storage with a valid Storage ID [Tags] DELFIN ${storage_id_test}= Register Test Storage # GET all storages ${storage}= Get Storage With ID ${storage_id_test} ${id_list}= create list ${storage['id']} List should contain value ${id_list} ${storage_id_test} Delete Storage With ID ${storage_id_test} *** Keywords *** Get All Storages Create Session delfin ${delfin_url} ${resp_get}= GET On Session delfin storages Status Should Be 200 ${resp_get} ${resp_get_storage}= Get Value From Json ${resp_get.json()} $..storages [Return] ${resp_get_storage[0]} Get Storage With ID [Arguments] ${storage_id} Create Session delfin ${delfin_url} ${resp_get}= GET On Session delfin storages/${storage_id} Status Should Be 200 ${resp_get} [Return] ${resp_get.json()} Delete Storage With ID [Arguments] ${storage_id} Create Session delfin ${delfin_url} ${resp_del}= DELETE On Session delfin storages/${storage_id} Status Should Be 202 ${resp_del} Sleep 10s Register Test Storage ${test}= Load Json From File ${CURDIR}/test.json ${access_info}= Get Value From Json ${test} $.test_register_access_info Create Session delfin ${delfin_url} ${resp_register}= POST On Session delfin storages json=${access_info[0]} Status Should Be 201 ${resp_register} Dictionary Should Contain Key ${resp_register.json()} id ${storage_id}= Get Value From Json ${resp_register.json()} $..id [Return] ${storage_id[0]} Register Fake Storage ${fake_rest}= Create dictionary host=10.10.10.100 port=${8080} username=admin password=password ${access_info}= Create dictionary vendor=fake_storage model=fake_driver rest=${fake_rest} ${fake_device}= Create dictionary vendor=fake_vendor model=fake_model Create Session delfin ${delfin_url} ${resp_register}= POST On Session delfin storages json=${access_info} ${storage_id}= Get Value From Json ${resp_register.json()} $..id Dictionary Should Contain Sub Dictionary ${resp_register.json()} ${fake_device} [Return] ${storage_id[0]} ================================================ FILE: delfin/tests/e2e/README.md ================================================ # Introduction This folder contains end to end, automated, testing scripts for Delfin. These tests are using [Robot Framework](https://robotframework.org/) for automation and report generation. The end-to-end tests are run against a test driver provided in the path `delfin/tests/e2e/testdriver`. This test driver uses, included storage details in file `delfin/tests/e2e/testdriver/storage.json` for storage simulation when testing. # Supported OS Ubuntu 18.04 # Prerequisite Prerequisite for [standalone installer](https://github.com/sodafoundation/delfin/blob/master/installer/README.md) is applicable here too. Install python 3.6+ and pip. Export PYTHONPATH as below ```bash export PYTHONPATH=$(pwd) ``` # Run tests The end-to-end tests can be run from command prompt as below ```bash git clone https://github.com/sodafoundation/delfin.git && cd delfin ./delfin/tests/e2e/test_e2e.sh ``` The above script injects test driver into delfin, builds and installs delfin using delfin standalone installer. It runs robot framework scripts against the running delfin application for verifying delfin APIs. When the script finish execution, robot framework generates the test execution summary and log. These are available in the delfin root directory, with names `report.html` and `log.html` respectively. ================================================ FILE: delfin/tests/e2e/RegisterStorage.robot ================================================ *** Settings *** Documentation Tests to verify that registration of storage succeed ... and fail correctly depending on the access_info ... input provided. ... Delfin needs to be installed and APIs are accessble. Library RequestsLibrary Library Collections Library JSONLibrary Library OperatingSystem *** Variables *** ${delfin_url} http://localhost:8190/v1 *** Test Cases *** Register Storage with in-valid access_info Test [Tags] DELFIN Create Session delfin ${delfin_url} ${ref_input}= Load Json From File ${CURDIR}/test.json ${ref_access_info}= Get Value From Json ${ref_input} $.test_register_access_info # Invalid ip ${access_info}= Copy Dictionary ${ref_access_info[0]} Deepcopy=True Set To Dictionary ${access_info['rest']} host=10.10.10.123 ${resp_register}= POST On Session delfin storages json=${access_info} expected_status=any Status Should Be 400 ${resp_register} dictionary should contain value ${resp_register.json()} InvalidIpOrPort # Invalid port ${access_info}= Copy Dictionary ${ref_access_info[0]} Deepcopy=True Set To Dictionary ${access_info['rest']} port=${80} ${resp_register}= POST On Session delfin storages json=${access_info} expected_status=any Status Should Be 400 ${resp_register} dictionary should contain value ${resp_register.json()} InvalidIpOrPort # Invalid username ${access_info}= Copy Dictionary ${ref_access_info[0]} Deepcopy=True Set To Dictionary ${access_info['rest']} username=user ${resp_register}= POST On Session delfin storages json=${access_info} expected_status=any Status Should Be 400 ${resp_register} dictionary should contain value ${resp_register.json()} InvalidUsernameOrPassword # Invalid Password ${access_info}= Copy Dictionary ${ref_access_info[0]} Deepcopy=True Set To Dictionary ${access_info['rest']} password=pass ${resp_register}= POST On Session delfin storages json=${access_info} expected_status=any Status Should Be 400 ${resp_register} dictionary should contain value ${resp_register.json()} InvalidUsernameOrPassword Register Storage with valid access_info Test [Tags] DELFIN # Read storage backend details from JSON file ${ref_storage}= Load Json From File ${CURDIR}/testdriver/storage.json ${ref_device}= Get Value From Json ${ref_storage} $..storage ${storage_test}= Register Test Storage Dictionary Should Contain Sub Dictionary ${storage_test} ${ref_device[0]} Delete Storage With ID ${storage_test["id"]} Register Storage with same access_info Test [Tags] DELFIN Sleep 10s ${storage_test}= Register Test Storage ${test}= Load Json From File ${CURDIR}/test.json ${access_info}= Get Value From Json ${test} $.test_register_access_info Create Session delfin ${delfin_url} ${resp_register}= POST On Session delfin storages json=${access_info[0]} expected_status=any Status Should Be 400 ${resp_register} dictionary should contain value ${resp_register.json()} StorageAlreadyExists Delete Storage With ID ${storage_test["id"]} *** Keywords *** Register Test Storage ${test}= Load Json From File ${CURDIR}/test.json ${access_info}= Get Value From Json ${test} $.test_register_access_info Create Session delfin ${delfin_url} ${resp_register}= POST On Session delfin storages json=${access_info[0]} Status Should Be 201 ${resp_register} [Return] ${resp_register.json()} Delete Storage With ID [Arguments] ${storage_id} Create Session delfin ${delfin_url} ${resp_del}= DELETE On Session delfin storages/${storage_id} Status Should Be 202 ${resp_del} Sleep 10s ================================================ FILE: delfin/tests/e2e/RemoveStorage.robot ================================================ *** Settings *** Documentation Tests to verify that Delete of storage Library RequestsLibrary Library Collections Library JSONLibrary *** Variables *** ${delfin_url} http://localhost:8190/v1 *** Test Cases *** Delete Storage with valid storage_id [Tags] DELFIN Sleep 10s ${storage_id_test}= Register Test Storage Create Session delfin ${delfin_url} ${resp_del}= DELETE On Session delfin storages/${storage_id_test} Status Should Be 202 ${resp_del} Delete Storage with in-valid storage_id [Tags] DELFIN Create Session delfin ${delfin_url} ${resp_del2}= DELETE On Session delfin storages/111 404 ${error_code}= Get Value From Json ${resp_del2.json()} $..error_code dictionary should contain value ${resp_del2.json()} StorageNotFound *** Keywords *** Register Test Storage ${test}= Load Json From File ${CURDIR}/test.json ${access_info}= Get Value From Json ${test} $.test_register_access_info Create Session delfin ${delfin_url} ${resp_register}= POST On Session delfin storages json=${access_info[0]} Status Should Be 201 ${resp_register} Dictionary Should Contain Key ${resp_register.json()} id ${storage_id}= Get Value From Json ${resp_register.json()} $..id [Return] ${storage_id[0]} ================================================ FILE: delfin/tests/e2e/UpdateAccessInfo.robot ================================================ *** Settings *** Documentation Tests to verify that GET of resources Library RequestsLibrary Library Collections Library JSONLibrary Library OperatingSystem Suite Setup Open Application Suite Teardown Close Application *** Variables *** ${delfin_url} http://localhost:8190/v1 *** Test Cases *** Update with invalid access_info Test [Tags] DELFIN @{storages}= Get All Storages ${storage_id}= Get Value From Json ${storages[0]} $..id # Invalid access_info vendor and model ${access_info_rest}= Create dictionary host=10.10.10.10 port=${8080} username=user_1 password=pass_1 ${access_info}= Create dictionary vendor=test_vendor model=test_model rest=${access_info_rest} ${resp}= Update Access Info ${storage_id[0]} ${access_info} Status Should Be 400 ${resp} dictionary should contain value ${resp.json()} InvalidInput # Invalid access_info, ip address ${access_info_rest}= Create dictionary host=100.10.10.10 port=${8080} username=user_1 password=pass_1 ${access_info}= Create dictionary rest=${access_info_rest} ${resp}= Update Access Info ${storage_id[0]} ${access_info} Status Should Be 400 ${resp} dictionary should contain value ${resp.json()} InvalidIpOrPort # Invalid access_info, port ${access_info_rest}= Create dictionary host=10.10.10.10 port=${80} username=user_1 password=pass_1 ${access_info}= Create dictionary rest=${access_info_rest} ${resp}= Update Access Info ${storage_id[0]} ${access_info} Status Should Be 400 ${resp} dictionary should contain value ${resp.json()} InvalidIpOrPort # Invalid access_info, username ${access_info_rest}= Create dictionary host=10.10.10.10 port=${8080} username=user password=pass_1 ${access_info}= Create dictionary rest=${access_info_rest} ${resp}= Update Access Info ${storage_id[0]} ${access_info} Status Should Be 400 ${resp} dictionary should contain value ${resp.json()} InvalidUsernameOrPassword # Invalid access_info, password ${access_info_rest}= Create dictionary host=10.10.10.10 port=${8080} username=user_1 password=pass ${access_info}= Create dictionary rest=${access_info_rest} ${resp}= Update Access Info ${storage_id[0]} ${access_info} Status Should Be 400 ${resp} dictionary should contain value ${resp.json()} InvalidUsernameOrPassword # Invalid storage_id ${access_info_rest}= Create dictionary host=10.10.10.10 port=${8080} username=user_1 password=pass_1 ${access_info}= Create dictionary rest=${access_info_rest} ${resp}= Update Access Info 123 ${access_info} Status Should Be 404 ${resp} dictionary should contain value ${resp.json()} AccessInfoNotFound Update with valid access_info Test [Tags] DELFIN @{storages}= Get All Storages ${storage_id}= Get Value From Json ${storages[0]} $..id # Valid access info and storage_id ${access_info_rest}= Create dictionary host=10.10.10.10 port=${8080} username=user_1 password=pass_1 ${access_info}= Create dictionary rest=${access_info_rest} ${resp}= Update Access Info ${storage_id[0]} ${access_info} Status Should Be 200 ${resp} dictionary should contain value ${resp.json()} test_vendor dictionary should contain value ${resp.json()} test_model dictionary should contain value ${resp.json()} ${storage_id[0]} *** Keywords *** Update Access Info [Arguments] ${storage_id} ${access_info} Create Session delfin ${delfin_url} ${resp_update}= PUT On Session delfin storages/${storage_id}/access-info json=${access_info} expected_status=any [Return] ${resp_update} Register Test Storage ${test}= Load Json From File ${CURDIR}/test.json ${access_info}= Get Value From Json ${test} $.test_register_access_info Create Session delfin ${delfin_url} ${resp_register}= POST On Session delfin storages json=${access_info[0]} Status Should Be 201 ${resp_register} Dictionary Should Contain Key ${resp_register.json()} id ${storage_id}= Get Value From Json ${resp_register.json()} $..id [Return] ${storage_id[0]} Delete Storage With ID [Arguments] ${storage_id} Create Session delfin ${delfin_url} ${resp_del}= DELETE On Session delfin storages/${storage_id} Status Should Be 202 ${resp_del} Sleep 10s Get All Storages Create Session delfin ${delfin_url} ${resp_get}= GET On Session delfin storages Status Should Be 200 ${resp_get} ${resp_get_storage}= Get Value From Json ${resp_get.json()} $..storages [Return] ${resp_get_storage[0]} Close Application @{storages}= Get All Storages FOR ${storage} IN @{storages} ${storage_id}= Get Value From Json ${storage} $..id Delete Storage With ID ${storage_id[0]} END Sleep 10s Open Application ${array_id}= Register Test Storage Sleep 10s ================================================ FILE: delfin/tests/e2e/__init__.py ================================================ ================================================ FILE: delfin/tests/e2e/test.json ================================================ { "test_register_access_info": { "vendor": "test_vendor", "model": "test_model", "rest": { "host": "10.10.10.10", "port": 8080, "username": "user_1", "password": "pass_1" }, "extra_attributes": { "path": "storage.json" } } } ================================================ FILE: delfin/tests/e2e/test_e2e.sh ================================================ #!/bin/bash # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. TOP_DIR=$(cd $(dirname "$0") && pwd) DELFIN_DIR=$(cd $TOP_DIR/../../.. && pwd) cd $DELFIN_DIR ps -ef | grep 'cmd/api.py' | grep -v grep | awk '{print $2}' | xargs kill -9 ps -ef | grep 'cmd/task.py' | grep -v grep | awk '{print $2}' | xargs kill -9 ps -ef | grep 'cmd/alert.py' | grep -v grep | awk '{print $2}' | xargs kill -9 ps -ef | grep 'exporter_server.py' | grep -v grep | awk '{print $2}' | xargs kill -9 # Update setup.py to inject test driver cp setup.py setup.py.orig str="\ \ \ \ \ \ \ \ \ \ \ \ 'test_vendor test_model = delfin.tests.e2e.testdriver:TestDriver'," sed -i "/FakeStorageDriver',/ a $str" $DELFIN_DIR/setup.py installer/install source installer/delfin/bin/activate pip install robotframework pip install robotframework-requests pip install robotframework-jsonlibrary ORIG_PATH='"storage.json"' FILE_PATH="${TOP_DIR}/testdriver/storage.json" sed -i "s|${ORIG_PATH}|\"${FILE_PATH}\"|g" $TOP_DIR/test.json sleep 10 robot delfin/tests/e2e deactivate mv setup.py.orig setup.py echo "Test completed successfully ..." ================================================ FILE: delfin/tests/e2e/testdriver/__init__.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import datetime import json import time from oslo_log import log from delfin import exception from delfin.common import constants from delfin.common.constants import ResourceType, StorageMetric from delfin.drivers import driver from delfin import cryptor LOG = log.getLogger(__name__) MIN_STORAGE, MAX_STORAGE = 1, 10 MIN_PERF_VALUES, MAX_PERF_VALUES = 1, 4 class TestDriver(driver.StorageDriver): """FakeStorageDriver shows how to implement the StorageDriver, it also plays a role as faker to fake data for being tested by clients. """ def __init__(self, **kwargs): super().__init__(**kwargs) access_info = kwargs if access_info is None: raise exception.InvalidInput('Input access_info is missing') self.array_json = access_info.get("extra_attributes").get("path") with open(self.array_json) as f: data = json.load(f) # Verify Host & Port f_host = data.get("access_info").get("rest").get("host") f_port = data.get("access_info").get("rest").get("port") f_user = data.get("access_info").get("rest").get("username") f_pass = data.get("access_info").get("rest").get("password") a_host = access_info.get("rest").get("host") a_port = access_info.get("rest").get("port") a_user = access_info.get("rest").get("username") a_pass = access_info.get("rest").get("password") a_pass = cryptor.decode(a_pass) if f_host != a_host: raise exception.InvalidIpOrPort if f_port != a_port: raise exception.InvalidIpOrPort if f_user != a_user: raise exception.InvalidUsernameOrPassword if f_pass != a_pass: raise exception.InvalidUsernameOrPassword def reset_connection(self, context, **kwargs): pass def get_storage(self, context): with open(self.array_json) as f: data = json.load(f) return data.get('storage') def _return_json(self, key): with open(self.array_json) as f: data = json.load(f) values = data.get(key) for value in values: value['storage_id'] = self.storage_id return values def list_storage_pools(self, ctx): return self._return_json('storage_pools') def list_volumes(self, ctx): return self._return_json('volumes') def list_controllers(self, ctx): return self._return_json('controllers') def list_ports(self, ctx): return self._return_json('ports') def list_disks(self, ctx): return self._return_json('disks') def list_quotas(self, ctx): return self._return_json('quotas') def list_filesystems(self, ctx): return self._return_json('filesystems') def list_qtrees(self, ctx): return self._return_json('qtrees') def list_shares(self, ctx): return self._return_json('shares') def add_trap_config(self, context, trap_config): pass def remove_trap_config(self, context, trap_config): pass @staticmethod def parse_alert(context, alert): pass def clear_alert(self, context, alert): pass def list_alerts(self, context, query_para=None): alert_list = [{ "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 100, 'alert_name': 'SNMP connect failed', 'category': 'Fault', 'severity': 'Major', 'type': 'OperationalViolation', 'location': 'NetworkEntity=entity1', 'description': "SNMP connection to the storage failed.", 'recovery_advice': "Check snmp configurations.", 'occur_time': int(time.time()) }, { "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 101, 'alert_name': 'Link state down', 'category': 'Fault', 'severity': 'Critical', 'type': 'CommunicationsAlarm', 'location': 'NetworkEntity=entity2', 'description': "Backend link has gone down", 'recovery_advice': "Recheck the network configuration setting.", 'occur_time': int(time.time()) }, { "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 102, 'alert_name': 'Power failure', 'category': 'Fault', 'severity': 'Fatal', 'type': 'OperationalViolation', 'location': 'NetworkEntity=entity3', 'description': "Power failure occurred. ", 'recovery_advice': "Investigate power connection.", 'occur_time': int(time.time()) }, { "storage_id": self.storage_id, 'alert_id': str(random.randint(1111111, 9999999)), 'sequence_number': 103, 'alert_name': 'Communication failure', 'category': 'Fault', 'severity': 'Critical', 'type': 'CommunicationsAlarm', 'location': 'NetworkEntity=network1', 'description': "Communication link gone down", 'recovery_advice': "Consult network administrator", 'occur_time': int(time.time()) }] return alert_list def _get_volume_range(self, start, end): volume_list = [] for i in range(start, end): total, used, free = self._get_random_capacity() v = { "name": "fake_vol_" + str(i), "storage_id": self.storage_id, "description": "Fake Volume", "status": "normal", "native_volume_id": "fake_original_id_" + str(i), "wwn": "fake_wwn_" + str(i), "total_capacity": total, "used_capacity": used, "free_capacity": free, } volume_list.append(v) return volume_list def _get_random_performance(self): def get_random_timestamp_value(): rtv = {} for i in range(MIN_PERF_VALUES, MAX_PERF_VALUES): timestamp = int(float(datetime.datetime.now().timestamp() ) * 1000) rtv[timestamp] = random.uniform(1, 100) return rtv # The sample performance_params after filling looks like, # performance_params = {timestamp1: value1, timestamp2: value2} performance_params = {} for key in constants.DELFIN_ARRAY_METRICS: performance_params[key] = get_random_timestamp_value() return performance_params def collect_array_metrics(self, ctx, storage_id, interval, is_history): rd_array_count = random.randint(MIN_STORAGE, MAX_STORAGE) LOG.info("Fake_array_metrics number for %s: %d" % ( storage_id, rd_array_count)) array_metrics = [] labels = {'storage_id': storage_id, 'resource_type': 'array'} fake_metrics = self._get_random_performance() for _ in range(rd_array_count): for key in constants.DELFIN_ARRAY_METRICS: m = constants.metric_struct(name=key, labels=labels, values=fake_metrics[key]) array_metrics.append(m) return array_metrics @staticmethod def get_capabilities(context, filters=None): """Get capability of supported driver.""" return { 'is_historic': False, 'resource_metrics': { ResourceType.STORAGE: { StorageMetric.THROUGHPUT.name: { "unit": StorageMetric.THROUGHPUT.unit, "description": StorageMetric.THROUGHPUT.description }, StorageMetric.RESPONSE_TIME.name: { "unit": StorageMetric.RESPONSE_TIME.unit, "description": StorageMetric.RESPONSE_TIME.description }, StorageMetric.READ_RESPONSE_TIME.name: { "unit": StorageMetric.READ_RESPONSE_TIME.unit, "description": StorageMetric.READ_RESPONSE_TIME.description }, StorageMetric.WRITE_RESPONSE_TIME.name: { "unit": StorageMetric.WRITE_RESPONSE_TIME.unit, "description": StorageMetric.WRITE_RESPONSE_TIME.description }, StorageMetric.IOPS.name: { "unit": StorageMetric.IOPS.unit, "description": StorageMetric.IOPS.description }, StorageMetric.READ_THROUGHPUT.name: { "unit": StorageMetric.READ_THROUGHPUT.unit, "description": StorageMetric.READ_THROUGHPUT.description }, StorageMetric.WRITE_THROUGHPUT.name: { "unit": StorageMetric.WRITE_THROUGHPUT.unit, "description": StorageMetric.WRITE_THROUGHPUT.description }, StorageMetric.READ_IOPS.name: { "unit": StorageMetric.READ_IOPS.unit, "description": StorageMetric.READ_IOPS.description }, StorageMetric.WRITE_IOPS.name: { "unit": StorageMetric.WRITE_IOPS.unit, "description": StorageMetric.WRITE_IOPS.description }, } } } ================================================ FILE: delfin/tests/e2e/testdriver/storage.json ================================================ { "access_info": { "vendor":"test_vendor", "model":"test_model", "rest": { "host": "10.10.10.10", "port": 8080, "username": "user_1", "password": "pass_1" }, "extra_attributes": { "array_json": null, "return_exception": null } }, "storage": { "name":"test_storage", "description": "Test storage array", "location": "Test location", "status": "normal", "vendor": "test_vendor", "model": "test_model", "serial_number": "Serial_123_ABC", "firmware_version": "1.10.100", "total_capacity": 1000000, "used_capacity": 750000, "free_capacity": 250000, "raw_capacity": 2000000, "subscribed_capacity": 1500000 }, "storage_pools": [ { "name": "test_pool_1", "storage_id": null, "native_storage_pool_id": "test_native_pool_id_1", "description": "Test Storage Pool", "status": "normal", "storage_type": "block", "total_capacity": 10000, "used_capacity": 7500, "free_capacity": 2500 }, { "name": "test_pool_2", "storage_id": null, "native_storage_pool_id": "test_native_pool_id_2", "description": "Test Storage Pool", "status": "normal", "total_capacity": 50000, "used_capacity": 57500, "free_capacity": 2500 } ], "volumes": [ { "name": "test_volume_1", "storage_id": null, "native_volume_id": "test_native_volume_id_1", "native_storage_pool_id": "test_native_pool_id_1", "description": "Test Storage Pool", "status": "normal", "total_capacity": 10000, "used_capacity": 7500, "free_capacity": 2500 }, { "name": "test_volume_2", "storage_id": null, "native_volume_id": "test_native_volume_id_2", "native_storage_pool_id": "test_native_pool_id_2", "description": "Test Storage Pool", "status": "normal", "total_capacity": 50000, "used_capacity": 57500, "free_capacity": 2500 } ], "controllers": [ { "name": "test_controller_1", "storage_id": null, "native_controller_id": "test_native_ctrl_id_1", "location": "location_controller_1", "status": "normal", "memory_size": 10000, "cpu_info": "Intel Xenon", "soft_version": "ver_100" }, { "name": "test_controller_2", "storage_id": null, "native_controller_id": "test_native_ctrl_id_2", "location": "location_controller_2", "status": "normal", "memory_size": 10000, "cpu_info": "ARM x64", "soft_version": "ver_200" } ], "ports": [ { "name": "test_port_1", "storage_id": null, "native_port_id": "test_native_port_id_1", "location": "location_port_1", "connection_status": "connected", "health_status": "normal", "type": "fc", "logical_type": "location_port_1", "speed": 100, "max_speed": 1000, "native_parent_id": "test_ports_parent_id_1", "wwn": "wwn_10000", "mac_address": "mac_abcd", "ipv4": "0.0.0.0", "ipv4_mask": "255.255.255.0", "ipv6": "0", "ipv6_mask": "::" }, { "name": "test_port_2", "storage_id": null, "native_port_id": "test_native_port_id_2", "location": "location_port_2", "connection_status": "connected", "health_status": "normal", "type": "fc", "logical_type": "location_port_2", "speed": 100, "max_speed": 1000, "native_parent_id": "test_ports_parent_id_2", "wwn": "wwn_20000", "mac_address": "mac_abcd", "ipv4": "100.0.0.0", "ipv4_mask": "255.255.255.0", "ipv6": "0", "ipv6_mask": "::" } ], "disks": [ { "name": "test_disk_id_1", "storage_id": null, "native_disk_id": "test_native_disk_id_2", "serial_number": "serial_1000", "manufacturer": "Crucial", "model": "model_SSD3D", "firmware": "firmware_123", "speed": 1000, "capacity": 10000, "status": "normal", "physical_type": "ssd", "logical_type": "free", "health_score": 75, "native_diskgroup_id": "test_dg_id_100", "location": "location_disk_1" }, { "name": "test_disk_id_1", "storage_id": null, "native_disk_id": "test_native_disk_id_2", "serial_number": "serial_1000", "manufacturer": "Crucial", "model": "model_SSD3D", "firmware": "firmware_123", "speed": 1000, "capacity": 10000, "status": "normal", "physical_type": "ssd", "logical_type": "free", "health_score": 75, "native_diskgroup_id": "test_dg_id_100", "location": "location_disk_1" } ], "quotas": [ { "native_quota_id": "test_quota_id_1", "type": "tree", "storage_id": null, "native_filesystem_id": "test_native_filesystem_id_1", "native_qtree_id": "test_native_qtree_id_1", "capacity_hard_limit": 10000, "capacity_soft_limit": 9000, "file_hard_limit": 1000, "file_soft_limit": 900, "file_count": 500, "used_capacity": 5000, "user_group_name": "usr0" }, { "native_quota_id": "test_quota_id_2", "type": "group", "storage_id": null, "native_filesystem_id": "test_native_filesystem_id_2", "native_qtree_id": "test_native_qtree_id_2", "capacity_hard_limit": 20000, "capacity_soft_limit": 9000, "file_hard_limit": 2000, "file_soft_limit": 900, "file_count": 700, "used_capacity": 7000, "user_group_name": "grp0" } ], "filesystems": [ { "name": "test_filesystem_1", "storage_id": null, "native_filesystem_id": "test_native_fs_id_1", "native_pool_id": "test_native_pool_id_1", "status": "normal", "type": "thin", "security_mode": "ntfs", "total_capacity": 10000, "used_capacity": 9000, "free_capacity": 1000, "worm": "non_worm", "deduplicated": true, "compressed": true }, { "name": "test_filesystem_2", "storage_id": null, "native_filesystem_id": "test_native_fs_id_2", "native_pool_id": "test_native_pool_id_2", "status": "faulty", "type": "thick", "security_mode": "unix", "total_capacity": 10000, "used_capacity": 9000, "free_capacity": 1000, "worm": "non_worm", "deduplicated": true, "compressed": true } ], "qtrees": [ { "name": "test_qtree_1", "storage_id": null, "native_qtree_id": "test_native_qtree_id_1", "native_filesystem_id": "test_native_filesystem_id_1", "security_mode": "ntfs", "path": "/root/qtree_1" }, { "name": "test_qtree_2", "storage_id": null, "native_qtree_id": "test_native_qtree_id_2", "native_filesystem_id": "test_native_filesystem_id_2", "security_mode": "unix", "path": "/root/qtree_2" } ], "shares": [ { "name": "test_share_1", "storage_id": null, "native_share_id": "test_native_share_id_1", "native_filesystem_id": "test_native_fs_id_1", "native_qtree_id": "test_native_qtree_id_1", "protocol": "cifs", "path": "/root/share_1" }, { "name": "test_share_2", "storage_id": null, "native_share_id": "test_native_share_id_2", "native_filesystem_id": "test_native_fs_id_2", "native_qtree_id": "test_native_qtree_id_2", "protocol": "cifs", "path": "/root/share_2" } ] } ================================================ FILE: delfin/tests/unit/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/alert_manager/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/alert_manager/fakes.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from retrying import Retrying from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher from delfin import exception from delfin.common import constants def fake_storage_info(): return { 'id': 'abcd-1234-56789', 'name': 'storage1', 'vendor': 'fake vendor', 'model': 'fake model', 'serial_number': 'serial-1234', } def fake_alert_model(): return {'alert_id': '1050', 'alert_name': 'SAMPLE_ALERT_NAME', 'severity': constants.Severity.WARNING, 'category': constants.Category.NOT_SPECIFIED, 'type': constants.EventType.EQUIPMENT_ALARM, 'sequence_number': 79, 'description': 'Diagnostic event trace triggered.', 'recovery_advice': 'NA', 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': 'Array id=000192601409,Component type=location1 ' 'Group,Component name=comp1,Event source=symmetrix', } def fake_v3_alert_source(): return {'storage_id': 'abcd-1234-5678', 'version': 'snmpv3', 'engine_id': '800000d30300000e112245', 'username': 'test1', 'auth_key': 'YWJjZDEyMzQ1Njc=', 'auth_protocol': 'HMACMD5', 'privacy_key': 'YWJjZDEyMzQ1Njc=', 'privacy_protocol': 'DES', 'host': '127.0.0.1' } def fake_v3_alert_source_list_with_one(): return [ {'storage_id': 'abcd-1234-5678', 'version': 'snmpv3', 'engine_id': '800000d30300000e112245', 'username': 'test1', 'auth_key': 'YWJjZDEyMzQ1Njc=', 'auth_protocol': 'HMACMD5', 'privacy_key': 'YWJjZDEyMzQ1Njc=', 'privacy_protocol': 'DES' } ] def null_alert_source_list(): return [] def fake_v3_alert_source_list(): return [ {'storage_id': 'abcd-1234-5678', 'version': 'snmpv3', 'engine_id': '800000d30300000e112245', 'username': 'test1', 'auth_key': 'YWJjZDEyMzQ1Njc=', 'auth_protocol': 'HMACMD5', 'privacy_key': 'YWJjZDEyMzQ1Njc=', 'privacy_protocol': 'DES' }, {'storage_id': 'abcd-1234-5677', 'version': 'snmpv3', 'engine_id': '800000d30300000e112246', 'username': 'test2', 'auth_key': 'YWJjZDEyMzQ1Njc=', 'auth_protocol': 'HMACMD5', 'privacy_key': 'YWJjZDEyMzQ1Njc=', 'privacy_protocol': 'DES' } ] def parse_alert_exception(): raise exception.InvalidResults("parse alert failed.") def load_config_exception(para): raise exception.InvalidResults("load config failed.") def mock_add_transport(snmpEngine, transportDomain, transport): snmpEngine.transportDispatcher = AsyncoreDispatcher() def config_delv3_exception(snmp_engine, username, securityEngineId): raise exception.InvalidResults("Config delete failed.") def mock_cmdgen_get_cmd(self, authData, transportTarget, *varNames, **kwargs): self.snmpEngine.transportDispatcher = AsyncoreDispatcher() return None, None, None, None def fake_v2_alert_source(): return {'storage_id': 'abcd-1234-5678', 'version': 'snmpv2c', 'community_string': 'YWJjZDEyMzQ1Njc=', } def fake_retry(*dargs, **dkw): """ Decorator function that instantiates the Retrying object @param *dargs: positional arguments passed to Retrying object @param **dkw: keyword arguments passed to the Retrying object """ if dkw.get('stop_max_attempt_number'): dkw['stop_max_attempt_number'] = 1 # support both @retry and @retry() as valid syntax if len(dargs) == 1 and callable(dargs[0]): def wrap_simple(f): @six.wraps(f) def wrapped_f(*args, **kw): return Retrying().call(f, *args, **kw) return wrapped_f return wrap_simple(dargs[0]) else: def wrap(f): @six.wraps(f) def wrapped_f(*args, **kw): return Retrying(*dargs, **dkw).call(f, *args, **kw) return wrapped_f return wrap FAKE_STOTRAGE = { 'id': 1, 'name': 'fake_storage', 'vendor': 'fake_vendor', 'model': 'fake_model', 'serial_number': '12345678', } ================================================ FILE: delfin/tests/unit/alert_manager/test_alert_processor.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest import mock from oslo_utils import importutils from delfin import context from delfin import exception from delfin.common import constants from delfin.tests.unit.alert_manager import fakes class AlertProcessorTestCase(unittest.TestCase): ALERT_PROCESSOR_CLASS = 'delfin.alert_manager.alert_processor' \ '.AlertProcessor' @mock.patch('delfin.task_manager.rpcapi.TaskAPI', mock.Mock()) def _get_alert_processor(self): alert_processor_class = importutils.import_class( self.ALERT_PROCESSOR_CLASS) alert_processor = alert_processor_class() return alert_processor @mock.patch('delfin.db.storage_get') @mock.patch('delfin.drivers.api.API.parse_alert') @mock.patch('delfin.exporter.base_exporter' '.AlertExporterManager.dispatch') @mock.patch('delfin.context.get_admin_context') def test_process_alert_info_success(self, mock_ctxt, mock_export_model, mock_parse_alert, mock_storage): fake_storage_info = fakes.fake_storage_info() input_alert = {'storage_id': 'abcd-1234-56789', 'connUnitEventId': 79, 'connUnitName': '000192601409', 'connUnitEventType': constants.EventType.EQUIPMENT_ALARM, 'connUnitEventDescr': 'Diagnostic ' 'event trace triggered.', 'connUnitEventSeverity': 'warning', 'connUnitType': 'storage-subsystem', 'asyncEventSource': 'eventsource1', 'asyncEventCode': '1050', 'asyncEventComponentType': '1051', 'asyncEventComponentName': 'comp1'} expected_alert_model = {'storage_id': fake_storage_info['id'], 'storage_name': fake_storage_info['name'], 'vendor': fake_storage_info['vendor'], 'model': fake_storage_info['model'], 'serial_number': fake_storage_info['serial_number'], 'location': 'Array id=000192601409,Component ' 'type=location1 ' 'Group,Component name=comp1,Event ' 'source=symmetrix', 'type': input_alert['connUnitEventType'], 'severity': constants.Severity.WARNING, 'category': constants.Category.NOT_SPECIFIED, 'description': input_alert['connUnitEventDescr'], 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'alert_id': input_alert['asyncEventCode'], 'alert_name': 'SAMPLE_ALERT_NAME', 'sequence_number': 79, 'recovery_advice': 'NA' } mock_storage.return_value = fake_storage_info expected_ctxt = context.get_admin_context() mock_ctxt.return_value = expected_ctxt mock_parse_alert.return_value = fakes.fake_alert_model() alert_processor_inst = self._get_alert_processor() alert_processor_inst.process_alert_info(input_alert) # Verify that model returned by driver is exported mock_export_model.assert_called_once_with(expected_ctxt, [expected_alert_model]) @mock.patch('delfin.db.storage_get') @mock.patch('delfin.drivers.api.API.parse_alert', fakes.parse_alert_exception) def test_process_alert_info_exception(self, mock_storage): """ Mock parse alert for raising exception""" alert = {'storage_id': 'abcd-1234-56789', 'storage_name': 'storage1', 'vendor': 'fake vendor', 'model': 'fake mode', 'serial_number': 'serial-1234'} mock_storage.return_value = fakes.fake_storage_info() alert_processor_inst = self._get_alert_processor() self.assertRaisesRegex(exception.InvalidResults, "Failed to fill the alert model from driver.", alert_processor_inst.process_alert_info, alert) @mock.patch('delfin.context.get_admin_context') @mock.patch('delfin.db.storage_get') @mock.patch('delfin.drivers.api.API.parse_alert') @mock.patch('delfin.alert_manager.alert_processor.' 'AlertProcessor.sync_storage_alert') def test_process_alert_info_incompletetrap_exception(self, mock_sync_alert, mock_parse_alert, mock_storage, mock_ctxt): """ Mock parse alert for raising exception""" alert = {'storage_id': 'abcd-1234-56789', 'storage_name': 'storage1', 'vendor': 'fake vendor', 'model': 'fake mode', 'serial_number': 'serial-1234'} mock_ctxt.return_value = context.get_admin_context() mock_storage.return_value = fakes.fake_storage_info() mock_parse_alert.side_effect = exception.IncompleteTrapInformation( 'abcd-1234-56789') alert_processor_inst = self._get_alert_processor() alert_processor_inst.process_alert_info(alert) self.assertTrue(mock_sync_alert.called) ================================================ FILE: delfin/tests/unit/alert_manager/test_snmp_validator.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from unittest import mock from pysnmp.entity.rfc3413.oneliner import cmdgen from delfin import context from delfin import db from delfin import test from delfin.alert_manager import snmp_validator from delfin.common import constants from delfin.exporter import base_exporter from delfin.tests.unit.alert_manager import fakes class TestSNMPValidator(test.TestCase): @mock.patch.object(db, 'alert_source_update', mock.Mock()) @mock.patch('delfin.alert_manager.snmp_validator.' 'SNMPValidator.validate_connectivity') def test_validate(self, mock_validate_connectivity): validator = snmp_validator.SNMPValidator() mock_validate_connectivity.return_value = fakes.fake_v3_alert_source() v3_alert_source_without_engine_id = fakes.fake_v3_alert_source() v3_alert_source_without_engine_id.pop('engine_id') validator.validate(context, v3_alert_source_without_engine_id) self.assertEqual(db.alert_source_update.call_count, 1) mock_validate_connectivity.return_value = fakes.fake_v3_alert_source() validator.validate(context, fakes.fake_v3_alert_source()) self.assertEqual(db.alert_source_update.call_count, 1) @mock.patch.object(cmdgen.UdpTransportTarget, '_resolveAddr', mock.Mock()) @mock.patch.object(cmdgen.UdpTransportTarget, 'setLocalAddress', mock.Mock()) @mock.patch.object(cmdgen.CommandGenerator, 'getCmd', fakes.mock_cmdgen_get_cmd) @mock.patch('delfin.db.access_info_get') @mock.patch('pysnmp.entity.observer.MetaObserver.registerObserver') @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' '.closeDispatcher') def test_validate_connectivity(self, mock_close_dispatcher, mock_register_observer, mock_access_info_get): # Get a random host a = random.randint(0, 255) b = random.randint(0, 255) c = random.randint(0, 255) d = random.randint(0, 255) host = str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d) # Get a random port port = random.randint(1024, 65535) # snmpv3 v3_alert_source = fakes.fake_v3_alert_source() v3_alert_source['host'] = host v3_alert_source['port'] = port mock_access_info_get.return_value = {'model': 'vsp'} snmp_validator.SNMPValidator.validate_connectivity( context.RequestContext(), v3_alert_source) self.assertEqual(mock_close_dispatcher.call_count, 1) self.assertEqual(mock_register_observer.call_count, 1) # snmpv2c v2_alert_source = fakes.fake_v2_alert_source() v2_alert_source['host'] = host v2_alert_source['port'] = port snmp_validator.SNMPValidator.validate_connectivity( context.RequestContext(), v2_alert_source) self.assertEqual(mock_close_dispatcher.call_count, 2) self.assertEqual(mock_register_observer.call_count, 1) @mock.patch.object(db, 'storage_get', mock.Mock(return_value=fakes.FAKE_STOTRAGE)) @mock.patch.object(snmp_validator.SNMPValidator, '_dispatch_snmp_validation_alert', mock.Mock()) def test_handle_validation_result(self): validator = snmp_validator.SNMPValidator() validator._handle_validation_result( context, fakes.FAKE_STOTRAGE['id'], constants.Category.FAULT) snmp_validator.SNMPValidator._dispatch_snmp_validation_alert \ .assert_called_with(context, fakes.FAKE_STOTRAGE, constants.Category.FAULT) validator._handle_validation_result( context, fakes.FAKE_STOTRAGE['id'], constants.Category.RECOVERY) snmp_validator.SNMPValidator._dispatch_snmp_validation_alert \ .assert_called_with(context, fakes.FAKE_STOTRAGE, constants.Category.RECOVERY) @mock.patch.object(base_exporter.AlertExporterManager, 'dispatch', mock.Mock()) def test_dispatch_snmp_validation_alert(self): validator = snmp_validator.SNMPValidator() storage = fakes.FAKE_STOTRAGE alert = { 'storage_id': storage['id'], 'storage_name': storage['name'], 'vendor': storage['vendor'], 'model': storage['model'], 'serial_number': storage['serial_number'], 'alert_id': constants.SNMP_CONNECTION_FAILED_ALERT_ID, 'sequence_number': 0, 'alert_name': 'SNMP connect failed', 'category': constants.Category.FAULT, 'severity': constants.Severity.MAJOR, 'type': constants.EventType.COMMUNICATIONS_ALARM, 'location': 'NetworkEntity=%s' % storage['name'], 'description': "SNMP connection to the storage failed. " "SNMP traps from storage will not be received.", 'recovery_advice': "1. The network connection is abnormal. " "2. SNMP authentication parameters " "are invalid.", 'occur_time': mock.ANY, } validator._dispatch_snmp_validation_alert( context, storage, constants.Category.FAULT) base_exporter.AlertExporterManager(). \ dispatch.assert_called_once_with(context, alert) ================================================ FILE: delfin/tests/unit/alert_manager/test_trap_receiver.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import retrying from oslo_utils import importutils from pysnmp.carrier.asyncore.dgram import udp from pysnmp.entity import engine, config from delfin import exception from delfin import test from delfin.tests.unit.alert_manager import fakes retrying.retry = fakes.fake_retry class TrapReceiverTestCase(test.TestCase): TRAP_RECEIVER_CLASS = 'delfin.alert_manager.trap_receiver' \ '.TrapReceiver' DEF_TRAP_RECV_ADDR = '127.0.0.1' DEF_TRAP_RECV_PORT = '162' def setUp(self): super(TrapReceiverTestCase, self).setUp() self.alert_rpc_api = mock.Mock() trap_receiver_class = importutils.import_class( self.TRAP_RECEIVER_CLASS) self.trap_receiver = trap_receiver_class(self.DEF_TRAP_RECV_ADDR, self.DEF_TRAP_RECV_PORT) self.mock_object(self.trap_receiver, 'alert_rpc_api', self.alert_rpc_api) def _get_trap_receiver(self): return self.trap_receiver @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' '.jobStarted') @mock.patch('delfin.db.api.alert_source_get_all') @mock.patch('pysnmp.carrier.asyncore.dgram.udp.UdpTransport' '.openServerMode', mock.Mock()) @mock.patch('delfin.alert_manager.trap_receiver.TrapReceiver' '._mib_builder', mock.Mock()) def test_start_success(self, mock_alert_source, mock_dispatcher): mock_alert_source.return_value = {} trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.trap_receiver_address = self.DEF_TRAP_RECV_ADDR trap_receiver_inst.trap_receiver_port = self.DEF_TRAP_RECV_PORT trap_receiver_inst.start() # Verify that snmp engine is initialised and transport config is set self.assertTrue(trap_receiver_inst.snmp_engine is not None) @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' '.jobStarted') @mock.patch('delfin.db.api.alert_source_get_all') @mock.patch('delfin.alert_manager.trap_receiver.TrapReceiver' '._load_snmp_config', fakes.load_config_exception) def test_start_with_exception(self, mock_alert_source, mock_dispatcher): mock_alert_source.return_value = {} trap_receiver_inst = self._get_trap_receiver() # Mock load config to raise exception self.assertRaisesRegex(ValueError, "Failed to setup for trap listener", trap_receiver_inst.start) @mock.patch('pysnmp.carrier.asyncore.dgram.udp.UdpTransport' '.openServerMode', mock.Mock()) def test_add_transport_successful(self): trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst.trap_receiver_address = self.DEF_TRAP_RECV_ADDR trap_receiver_inst.trap_receiver_port = self.DEF_TRAP_RECV_PORT trap_receiver_inst._add_transport() get_transport = config.getTransport(trap_receiver_inst.snmp_engine, udp.domainName) # Verify that snmp engine transport config is set after _add_transport self.assertTrue(get_transport is not None) def test_add_transport_exception(self): trap_receiver_inst = self._get_trap_receiver() exception_msg = r"int\(\) argument must be a string, " \ "a bytes-like object or a number, not 'NoneType'" # Mock exception by not initialising snmp engine self.assertRaisesRegex(exception.DelfinException, exception_msg, trap_receiver_inst._add_transport) @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' '.jobStarted') @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' '.closeDispatcher') @mock.patch('delfin.db.api.alert_source_get_all') @mock.patch('pysnmp.carrier.asyncore.dgram.udp.UdpTransport' '.openServerMode', mock.Mock()) @mock.patch('pysnmp.entity.config.addTransport', fakes.mock_add_transport) @mock.patch('delfin.alert_manager.trap_receiver.TrapReceiver' '._mib_builder', mock.Mock()) def test_stop_with_snmp_engine(self, mock_alert_source, mock_close_dispatcher, mock_dispatcher): mock_alert_source.return_value = {} trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.trap_receiver_address = self.DEF_TRAP_RECV_ADDR trap_receiver_inst.trap_receiver_port = self.DEF_TRAP_RECV_PORT trap_receiver_inst.start() trap_receiver_inst.stop() # Verify that close dispatcher is called during alert manager stop self.assertTrue(mock_close_dispatcher.called) @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' '.closeDispatcher') def test_stop_without_snmp_engine(self, mock_close_dispatcher): trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.stop() # Verify that close dispatcher is not called when engine is not # initialised self.assertFalse(mock_close_dispatcher.called) @mock.patch('delfin.cryptor.decode', mock.Mock(return_value='public')) @mock.patch('delfin.alert_manager.snmp_validator.SNMPValidator.validate') @mock.patch('pysnmp.entity.config.addV1System') def test_sync_snmp_config_add_v2_version(self, mock_add_config, mock_validator): ctxt = {} alert_config = {'storage_id': 'abcd-1234-5678', 'version': 'snmpv2c', 'community_string': b'public'} trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst.sync_snmp_config(ctxt, snmp_config_to_add=alert_config) # Verify that config is added to snmp engine # Storage_id is internally modified to remove '-' while adding mock_add_config.assert_called_once_with(trap_receiver_inst.snmp_engine, 'abcd12345678', alert_config[ 'community_string'], contextName=alert_config[ 'community_string']) mock_validator.assert_called_once_with(ctxt, alert_config) @mock.patch('pysnmp.entity.config.delV1System') def test_sync_snmp_config_del_v2_version(self, mock_del_config): ctxt = {} alert_config = {'storage_id': 'abcd-1234-5678', 'version': 'snmpv2c', 'community_string': 'public'} trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst.sync_snmp_config(ctxt, snmp_config_to_del=alert_config) # Verify that config is deleted from snmp engine # Storage_id is internally modified to remove '-' while deleting mock_del_config.assert_called_once_with(trap_receiver_inst.snmp_engine, 'abcd12345678') def test_sync_snmp_config_add_invalid_version(self): ctxt = {} alert_source_config = {'storage_id': 'abcd-1234-5678', 'version': 'snmpv4', 'community_string': b'public'} trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() self.assertRaisesRegex(exception.InvalidSNMPConfig, "Invalid snmp " "version", trap_receiver_inst.sync_snmp_config, ctxt, snmp_config_to_add=alert_source_config) @mock.patch('pysnmp.entity.config.addV3User') def test_sync_snmp_config_add_v3_version(self, mock_add_config): ctxt = {} alert_config = fakes.fake_v3_alert_source() trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst.sync_snmp_config(ctxt, snmp_config_to_add=alert_config) # Verify that addV3User to add config to engine self.assertTrue(mock_add_config.called) @mock.patch('pysnmp.entity.config.delV3User') def test_sync_snmp_config_del_v3_version(self, mock_del_config): ctxt = {} alert_config = fakes.fake_v3_alert_source() trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst.sync_snmp_config(ctxt, snmp_config_to_add=alert_config) trap_receiver_inst.sync_snmp_config(ctxt, snmp_config_to_del=alert_config) # Verify that delV3User to del config from engine self.assertTrue(mock_del_config.called) @mock.patch('pysnmp.entity.config.delV3User', fakes.config_delv3_exception) @mock.patch('logging.LoggerAdapter.warning') def test_sync_snmp_config_del_exception(self, mock_log_warning): ctxt = {} alert_config = fakes.fake_v3_alert_source() trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst.sync_snmp_config(ctxt, snmp_config_to_del=alert_config) self.assertTrue(mock_log_warning.called) def test_sync_snmp_config_invalid_auth_protocol(self): ctxt = {} alert_source_config = fakes.fake_v3_alert_source() alert_source_config['auth_protocol'] = 'invalid_auth' trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() self.assertRaisesRegex(exception.InvalidSNMPConfig, "Invalid " "auth_protocol", trap_receiver_inst.sync_snmp_config, ctxt, snmp_config_to_add=alert_source_config) def test_sync_snmp_config_invalid_priv_protocol(self): ctxt = {} alert_source_config = fakes.fake_v3_alert_source() alert_source_config['privacy_protocol'] = 'invalid_priv' trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() self.assertRaisesRegex(exception.InvalidSNMPConfig, "Invalid " "privacy_protocol", trap_receiver_inst.sync_snmp_config, ctxt, snmp_config_to_add=alert_source_config) @mock.patch('pysnmp.entity.config.addV3User') @mock.patch('delfin.db.api.alert_source_get_all') def test_load_snmp_config(self, mock_alert_source_list, mock_add_config): mock_alert_source_list.return_value = fakes.fake_v3_alert_source_list() trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst._load_snmp_config() # Verify that config is added to engine self.assertTrue(mock_add_config.called) @mock.patch('delfin.db.alert_source_get_all') def test_get_alert_source_by_host_success(self, mock_alert_source_list): # alert_source_config = fakes.fake_v3_alert_source() expected_alert_source = {'storage_id': 'abcd-1234-5678', 'version': 'snmpv3', 'engine_id': '800000d30300000e112245', 'username': 'test1', 'auth_key': 'YWJjZDEyMzQ1Njc=', 'auth_protocol': 'HMACMD5', 'privacy_key': 'YWJjZDEyMzQ1Njc=', 'privacy_protocol': 'DES' } mock_alert_source_list.return_value = fakes. \ fake_v3_alert_source_list_with_one() trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() alert_source = trap_receiver_inst. \ _get_alert_source_by_host('127.0.0.1') self.assertDictEqual(expected_alert_source, alert_source) @mock.patch('delfin.db.alert_source_get_all') def test_get_alert_source_by_host_without_storage(self, mock_alert_source_list): # alert_source_config = fakes.fake_v3_alert_source() mock_alert_source_list.return_value = fakes.null_alert_source_list() trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() self.assertRaisesRegex(exception.AlertSourceNotFoundWithHost, "", trap_receiver_inst._get_alert_source_by_host, '127.0.0.1') ================================================ FILE: delfin/tests/unit/api/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/api/extensions/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/api/extensions/foxinsocks.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from delfin.api import extensions class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." class Foxinsocks(extensions.ExtensionDescriptor): """The Fox In Socks Extension.""" name = "Fox In Socks" alias = "FOXNSOX" namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" updated = "2011-01-22T13:25:27-06:00" def __init__(self, ext_mgr): ext_mgr.register(self) def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', FoxInSocksController()) resources.append(resource) return resources def get_controller_extensions(self): return [] ================================================ FILE: delfin/tests/unit/api/fakes.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import routes import webob.dec import webob.request from oslo_service import wsgi from delfin import context from delfin import exception from delfin.api.common import wsgi as os_wsgi from delfin.common import config, constants # noqa from delfin.common.constants import ResourceType, StorageMetric, \ StoragePoolMetric, VolumeMetric, ControllerMetric, PortMetric, \ DiskMetric, FileSystemMetric from delfin.db.sqlalchemy import models @webob.dec.wsgify def fake_wsgi(self, req): return self.application class TestRouter(wsgi.Router): def __init__(self, controller): mapper = routes.Mapper() mapper.resource("test", "tests", controller=os_wsgi.Resource(controller)) super(TestRouter, self).__init__(mapper) class HTTPRequest(os_wsgi.Request): @classmethod def blank(cls, *args, **kwargs): if not kwargs.get('base_url'): kwargs['base_url'] = 'http://localhost/v1' use_admin_context = kwargs.pop('use_admin_context', False) out = os_wsgi.Request.blank(*args, **kwargs) out.environ['delfin.context'] = context.RequestContext( is_admin=use_admin_context) return out def fake_storages_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): return [ { "id": "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6", "created_at": "2020-06-09T08:59:48.710890", "free_capacity": 1045449, "updated_at": "2020-06-09T08:59:48.769470", "name": "fake_driver", "location": "HK", "firmware_version": "1.0.0", "vendor": "fake_vendor", "status": "normal", "sync_status": constants.SyncStatus.SYNCED, "model": "fake_model", "description": "it is a fake driver.", "serial_number": "2102453JPN12KA0000113", "used_capacity": 3126, "total_capacity": 1048576, 'raw_capacity': 1610612736000, 'subscribed_capacity': 219902325555200 }, { "id": "277a1d8f-a36e-423e-bdd9-db154f32c289", "created_at": "2020-06-09T08:58:23.008821", "free_capacity": 1045449, "updated_at": "2020-06-09T08:58:23.033601", "name": "fake_driver", "location": "HK", "firmware_version": "1.0.0", "vendor": "fake_vendor", "status": "normal", "sync_status": constants.SyncStatus.SYNCED, "model": "fake_model", "description": "it is a fake driver.", "serial_number": "2102453JPN12KA0000112", "used_capacity": 3126, "total_capacity": 1048576, 'raw_capacity': 1610612736000, 'subscribed_capacity': 219902325555200 } ] def fake_storages_get_all_with_filter( context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): return [ { "id": "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6", "created_at": "2020-06-09T08:59:48.710890", "free_capacity": 1045449, "updated_at": "2020-06-09T08:59:48.769470", "name": "fake_driver", "location": "HK", "firmware_version": "1.0.0", "vendor": "fake_vendor", "status": "normal", "sync_status": constants.SyncStatus.SYNCED, "model": "fake_model", "description": "it is a fake driver.", "serial_number": "2102453JPN12KA0000113", "used_capacity": 3126, "total_capacity": 1048576, 'raw_capacity': 1610612736000, 'subscribed_capacity': 219902325555200 } ] def fake_storages_show(context, storage_id): return { "id": "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6", "created_at": "2020-06-09T08:59:48.710890", "free_capacity": 1045449, "updated_at": "2020-06-09T08:59:48.769470", "name": "fake_driver", "location": "HK", "firmware_version": "1.0.0", "vendor": "fake_vendor", "status": "normal", "sync_status": constants.SyncStatus.SYNCED, "model": "fake_model", "description": "it is a fake driver.", "serial_number": "2102453JPN12KA0000113", "used_capacity": 3126, "total_capacity": 1048576, 'raw_capacity': 1610612736000, 'subscribed_capacity': 219902325555200 } def fake_access_info_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): return [ { 'created_at': "2020-06-09T08:59:48.710890", 'storage_id': '5f5c806d-2e65-473c-b612-345ef43f0642', 'model': 'fake_driver', 'vendor': 'fake_storage', 'rest': { 'host': '10.0.0.76', 'port': 1234, 'username': 'admin', 'password': b'YWJjZA==' }, 'extra_attributes': {'array_id': '0001234567891'}, 'updated_at': None } ] def fake_sync(self, req, id): pass def fake_v3_alert_source_config(): return {'host': '127.0.0.1', 'version': 'snmpv3', 'security_level': 'authPriv', 'engine_id': '800000d30300000e112245', 'username': 'test1', 'auth_key': 'abcd123456', 'auth_protocol': 'HMACMD5', 'privacy_key': 'abcd123456', 'privacy_protocol': 'DES', 'context_name': 'NA', 'retry_num': 2, 'expiration': 2, 'port': 161 } def fake_v2_alert_source_config(): return {'host': '127.0.0.1', 'version': 'snmpv2c', 'community_string': 'public', 'context_name': 'NA', 'retry_num': 2, 'expiration': 2, 'port': 161 } def fake_v3_alert_source(): alert_source = models.AlertSource() alert_source.host = '127.0.0.1' alert_source.storage_id = 'abcd-1234-5678' alert_source.version = 'snmpv3' alert_source.engine_id = '800000d30300000e112245' alert_source.username = 'test1' alert_source.auth_key = 'YWJjZDEyMzQ1Njc=' alert_source.auth_protocol = 'HMACMD5' alert_source.privacy_key = 'YWJjZDEyMzQ1Njc=' alert_source.privacy_protocol = 'DES' alert_source.port = 161 alert_source.context_name = "" alert_source.retry_num = 1 alert_source.expiration = 1 alert_source.created_at = '2020-06-15T09:50:31.698956' alert_source.updated_at = '2020-06-15T09:50:31.698956' return alert_source def fake_all_snmp_configs(): alert_source = models.AlertSource() alert_source.host = '127.0.0.1' alert_source.storage_id = 'abcd-1234-5678' alert_source.version = 'snmpv3' alert_source.engine_id = '800000d30300000e112245' alert_source.username = 'test1' alert_source.auth_key = 'YWJjZDEyMzQ1Njc=' alert_source.auth_protocol = 'HMACMD5' alert_source.privacy_key = 'YWJjZDEyMzQ1Njc=' alert_source.privacy_protocol = 'DES' alert_source.port = 161 alert_source.context_name = "" alert_source.retry_num = 1 alert_source.expiration = 1 alert_source.created_at = '2020-06-15T09:50:31.698956' alert_source.updated_at = '2020-06-15T09:50:31.698956' return [alert_source] def fake_v3_alert_source_noauth_nopriv(): alert_source = models.AlertSource() alert_source.host = '127.0.0.1' alert_source.storage_id = 'abcd-1234-5678' alert_source.version = 'snmpv3' alert_source.security_level = 'noAuthnoPriv' alert_source.engine_id = '800000d30300000e112245' alert_source.username = 'test1' alert_source.port = 161 alert_source.context_name = "" alert_source.retry_num = 1 alert_source.expiration = 1 alert_source.created_at = '2020-06-15T09:50:31.698956' alert_source.updated_at = '2020-06-15T09:50:31.698956' return alert_source def fake_v3_alert_source_auth_nopriv(): alert_source = models.AlertSource() alert_source.host = '127.0.0.1' alert_source.storage_id = 'abcd-1234-5678' alert_source.version = 'snmpv3' alert_source.security_level = 'authNoPriv' alert_source.auth_protocol = 'HMACMD5' alert_source.engine_id = '800000d30300000e112245' alert_source.username = 'test1' alert_source.port = 161 alert_source.context_name = "" alert_source.retry_num = 1 alert_source.expiration = 1 alert_source.created_at = '2020-06-15T09:50:31.698956' alert_source.updated_at = '2020-06-15T09:50:31.698956' return alert_source def fake_v2_alert_source(): alert_source = models.AlertSource() alert_source.host = '127.0.0.1' alert_source.storage_id = 'abcd-1234-5678' alert_source.version = 'snmpv2c' alert_source.community_string = 'public' alert_source.port = 161 alert_source.context_name = "" alert_source.retry_num = 1 alert_source.expiration = 1 alert_source.created_at = '2020-06-15T09:50:31.698956' alert_source.updated_at = '2020-06-15T09:50:31.698956' return alert_source def alert_source_get_exception(ctx, storage_id): raise exception.AlertSourceNotFound('abcd-1234-5678') def fake_access_info_show(context, storage_id): access_info = models.AccessInfo() access_info.updated_at = '2020-06-15T09:50:31.698956' access_info.storage_id = '865ffd4d-f1f7-47de-abc3-5541ef44d0c1' access_info.created_at = '2020-06-15T09:50:31.698956' access_info.vendor = 'fake_storage' access_info.model = 'fake_driver' access_info.rest = { 'host': '10.0.0.0', 'username': 'admin', 'password': 'YWJjZA==', 'port': 1234 } access_info.extra_attributes = {'array_id': '0001234567897'} return access_info def fake_access_infos_show_all(context): access_info = models.AccessInfo() access_info.updated_at = '2020-06-15T09:50:31.698956' access_info.storage_id = '865ffd4d-f1f7-47de-abc3-5541ef44d0c1' access_info.created_at = '2020-06-15T09:50:31.698956' access_info.vendor = 'fake_storage' access_info.model = 'fake_driver' access_info.rest = { 'host': '10.0.0.0', 'username': 'admin', 'password': 'YWJjZA==', 'port': 1234 } access_info.extra_attributes = {'array_id': '0001234567897'} return [access_info] def fake_update_access_info(self, context): access_info = models.AccessInfo() access_info.updated_at = '2020-06-15T09:50:31.698956' access_info.storage_id = '865ffd4d-f1f7-47de-abc3-5541ef44d0c1' access_info.created_at = '2020-06-15T09:50:31.698956' access_info.vendor = 'fake_storage' access_info.model = 'fake_driver' access_info.rest = { 'host': '10.0.0.0', 'username': 'admin_modified', 'password': 'YWJjZA==', 'port': 1234 } access_info.extra_attributes = {'array_id': '0001234567897'} return access_info def fake_volume_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): return [ { "created_at": "2020-06-10T07:17:31.157079", "updated_at": "2020-06-10T07:17:31.157079", "id": "d7fe425b-fddc-4ba4-accb-4343c142dc47", "name": "004DF", "storage_id": "5f5c806d-2e65-473c-b612-345ef43f0642", "native_storage_pool_id": "SRP_1", "description": "fake_storage 'thin device' volume", "status": "available", "native_volume_id": "004DF", "wwn": "60000970000297801855533030344446", "type": 'thin', "total_capacity": 1075838976, "used_capacity": 0, "free_capacity": 1075838976, "compressed": True, "deduplicated": False }, { "created_at": "2020-06-10T07:17:31.157079", "updated_at": "2020-06-10T07:17:31.157079", "id": "dad84a1f-db8d-49ab-af40-048fc3544c12", "name": "004E0", "storage_id": "5f5c806d-2e65-473c-b612-345ef43f0642", "native_storage_pool_id": "SRP_1", "description": "fake_storage 'thin device' volume", "status": "available", "native_volume_id": "004E0", "wwn": "60000970000297801855533030344530", "type": 'thin', "total_capacity": 1075838976, "used_capacity": 0, "free_capacity": 1075838976, "compressed": True, "deduplicated": False } ] def fake_volume_show(context, volume_id): return { "created_at": "2020-06-10T07:17:31.157079", "updated_at": "2020-06-10T07:17:31.157079", "id": "d7fe425b-fddc-4ba4-accb-4343c142dc47", "name": "004DF", "storage_id": "5f5c806d-2e65-473c-b612-345ef43f0642", "native_storage_pool_id": "SRP_1", "description": "fake_storage 'thin device' volume", "status": "available", "native_volume_id": "004DF", "wwn": "60000970000297801855533030344446", "type": 'thin', "total_capacity": 1075838976, "used_capacity": 0, "free_capacity": 1075838976, "compressed": True, "deduplicated": False } def fake_storage_pool_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): return [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "SRP_1", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_pool_id": "SRP_1", "description": "fake storage Pool", "status": "normal", "storage_type": "block", "total_capacity": 26300318136401, "used_capacity": 19054536509358, "free_capacity": 7245781627043, 'subscribed_capacity': 219902325555200 } ] def fake_storage_pool_show(context, storage_pool_id): return { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "SRP_1", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_pool_id": "SRP_1", "description": "fake storage Pool", "status": "normal", "storage_type": "block", "total_capacity": 26300318136401, "used_capacity": 19054536509358, "free_capacity": 7245781627043, 'subscribed_capacity': 219902325555200 } def fake_storage_get_exception(ctx, storage_id): raise exception.StorageNotFound(storage_id) def fake_getcmd_exception(auth_data, transport_target, *var_names, **kwargs): return "Connection failed", None, None, None def fake_getcmd_success(auth_data, transport_target, *var_names, **kwargs): return None, None, None, None def fake_get_capabilities(context, storage_id): return {'is_historic': False, 'resource_metrics': { ResourceType.STORAGE: { StorageMetric.THROUGHPUT.name: { "unit": StorageMetric.THROUGHPUT.unit, "description": StorageMetric.THROUGHPUT.description }, StorageMetric.RESPONSE_TIME.name: { "unit": StorageMetric.RESPONSE_TIME.unit, "description": StorageMetric.RESPONSE_TIME.description }, StorageMetric.READ_RESPONSE_TIME.name: { "unit": StorageMetric.READ_RESPONSE_TIME.unit, "description": StorageMetric.READ_RESPONSE_TIME.description }, StorageMetric.WRITE_RESPONSE_TIME.name: { "unit": StorageMetric.WRITE_RESPONSE_TIME.unit, "description": StorageMetric.WRITE_RESPONSE_TIME.description }, StorageMetric.IOPS.name: { "unit": StorageMetric.IOPS.unit, "description": StorageMetric.IOPS.description }, StorageMetric.READ_THROUGHPUT.name: { "unit": StorageMetric.READ_THROUGHPUT.unit, "description": StorageMetric.READ_THROUGHPUT.description }, StorageMetric.WRITE_THROUGHPUT.name: { "unit": StorageMetric.WRITE_THROUGHPUT.unit, "description": StorageMetric.WRITE_THROUGHPUT.description }, StorageMetric.READ_IOPS.name: { "unit": StorageMetric.READ_IOPS.unit, "description": StorageMetric.READ_IOPS.description }, StorageMetric.WRITE_IOPS.name: { "unit": StorageMetric.WRITE_IOPS.unit, "description": StorageMetric.WRITE_IOPS.description }, }, ResourceType.STORAGE_POOL: { StoragePoolMetric.THROUGHPUT.name: { "unit": StoragePoolMetric.THROUGHPUT.unit, "description": StoragePoolMetric.THROUGHPUT.description }, StoragePoolMetric.RESPONSE_TIME.name: { "unit": StoragePoolMetric.RESPONSE_TIME.unit, "description": StoragePoolMetric.RESPONSE_TIME.description }, StoragePoolMetric.IOPS.name: { "unit": StoragePoolMetric.IOPS.unit, "description": StoragePoolMetric.IOPS.description }, StoragePoolMetric.READ_THROUGHPUT.name: { "unit": StoragePoolMetric.READ_THROUGHPUT.unit, "description": StoragePoolMetric.READ_THROUGHPUT.description }, StoragePoolMetric.WRITE_THROUGHPUT.name: { "unit": StoragePoolMetric.WRITE_THROUGHPUT.unit, "description": StoragePoolMetric.WRITE_THROUGHPUT.description }, StoragePoolMetric.READ_IOPS.name: { "unit": StoragePoolMetric.READ_IOPS.unit, "description": StoragePoolMetric.READ_IOPS.description }, StoragePoolMetric.WRITE_IOPS.name: { "unit": StoragePoolMetric.WRITE_IOPS.unit, "description": StoragePoolMetric.WRITE_IOPS.description }, }, ResourceType.VOLUME: { VolumeMetric.THROUGHPUT.name: { "unit": VolumeMetric.THROUGHPUT.unit, "description": VolumeMetric.THROUGHPUT.description }, VolumeMetric.RESPONSE_TIME.name: { "unit": VolumeMetric.RESPONSE_TIME.unit, "description": VolumeMetric.RESPONSE_TIME.description }, VolumeMetric.READ_RESPONSE_TIME.name: { "unit": VolumeMetric.READ_RESPONSE_TIME.unit, "description": VolumeMetric.READ_RESPONSE_TIME.description }, VolumeMetric.WRITE_RESPONSE_TIME.name: { "unit": VolumeMetric.WRITE_RESPONSE_TIME.unit, "description": VolumeMetric.WRITE_RESPONSE_TIME.description }, VolumeMetric.IOPS.name: { "unit": VolumeMetric.IOPS.unit, "description": VolumeMetric.IOPS.description }, VolumeMetric.READ_THROUGHPUT.name: { "unit": VolumeMetric.READ_THROUGHPUT.unit, "description": VolumeMetric.READ_THROUGHPUT.description }, VolumeMetric.WRITE_THROUGHPUT.name: { "unit": VolumeMetric.WRITE_THROUGHPUT.unit, "description": VolumeMetric.WRITE_THROUGHPUT.description }, VolumeMetric.READ_IOPS.name: { "unit": VolumeMetric.READ_IOPS.unit, "description": VolumeMetric.READ_IOPS.description }, VolumeMetric.WRITE_IOPS.name: { "unit": VolumeMetric.WRITE_IOPS.unit, "description": VolumeMetric.WRITE_IOPS.description }, VolumeMetric.CACHE_HIT_RATIO.name: { "unit": VolumeMetric.CACHE_HIT_RATIO.unit, "description": VolumeMetric.CACHE_HIT_RATIO.description }, VolumeMetric.READ_CACHE_HIT_RATIO.name: { "unit": VolumeMetric.READ_CACHE_HIT_RATIO.unit, "description": VolumeMetric.READ_CACHE_HIT_RATIO.description }, VolumeMetric.WRITE_CACHE_HIT_RATIO.name: { "unit": VolumeMetric.WRITE_CACHE_HIT_RATIO.unit, "description": VolumeMetric.WRITE_CACHE_HIT_RATIO.description }, VolumeMetric.IO_SIZE.name: { "unit": VolumeMetric.IO_SIZE.unit, "description": VolumeMetric.IO_SIZE.description }, VolumeMetric.READ_IO_SIZE.name: { "unit": VolumeMetric.READ_IO_SIZE.unit, "description": VolumeMetric.READ_IO_SIZE.description }, VolumeMetric.WRITE_IO_SIZE.name: { "unit": VolumeMetric.WRITE_IO_SIZE.unit, "description": VolumeMetric.WRITE_IO_SIZE.description }, }, ResourceType.CONTROLLER: { ControllerMetric.THROUGHPUT.name: { "unit": ControllerMetric.THROUGHPUT.unit, "description": ControllerMetric.THROUGHPUT.description }, ControllerMetric.RESPONSE_TIME.name: { "unit": ControllerMetric.RESPONSE_TIME.unit, "description": ControllerMetric.RESPONSE_TIME.description }, ControllerMetric.IOPS.name: { "unit": ControllerMetric.IOPS.unit, "description": ControllerMetric.IOPS.description }, ControllerMetric.READ_THROUGHPUT.name: { "unit": ControllerMetric.READ_THROUGHPUT.unit, "description": ControllerMetric.READ_THROUGHPUT.description }, ControllerMetric.WRITE_THROUGHPUT.name: { "unit": ControllerMetric.WRITE_THROUGHPUT.unit, "description": ControllerMetric.WRITE_THROUGHPUT.description }, ControllerMetric.READ_IOPS.name: { "unit": ControllerMetric.READ_IOPS.unit, "description": ControllerMetric.READ_IOPS.description }, ControllerMetric.WRITE_IOPS.name: { "unit": ControllerMetric.WRITE_IOPS.unit, "description": ControllerMetric.WRITE_IOPS.description }, ControllerMetric.CPU_USAGE.name: { "unit": ControllerMetric.CPU_USAGE.unit, "description": ControllerMetric.CPU_USAGE.description } }, ResourceType.PORT: { PortMetric.THROUGHPUT.name: { "unit": PortMetric.THROUGHPUT.unit, "description": PortMetric.THROUGHPUT.description }, PortMetric.RESPONSE_TIME.name: { "unit": PortMetric.RESPONSE_TIME.unit, "description": PortMetric.RESPONSE_TIME.description }, PortMetric.IOPS.name: { "unit": PortMetric.IOPS.unit, "description": PortMetric.IOPS.description }, PortMetric.READ_THROUGHPUT.name: { "unit": PortMetric.READ_THROUGHPUT.unit, "description": PortMetric.READ_THROUGHPUT.description }, PortMetric.WRITE_THROUGHPUT.name: { "unit": PortMetric.WRITE_THROUGHPUT.unit, "description": PortMetric.WRITE_THROUGHPUT.description }, PortMetric.READ_IOPS.name: { "unit": PortMetric.READ_IOPS.unit, "description": PortMetric.READ_IOPS.description }, PortMetric.WRITE_IOPS.name: { "unit": PortMetric.WRITE_IOPS.unit, "description": PortMetric.WRITE_IOPS.description }, }, ResourceType.DISK: { DiskMetric.THROUGHPUT.name: { "unit": DiskMetric.THROUGHPUT.unit, "description": DiskMetric.THROUGHPUT.description }, DiskMetric.RESPONSE_TIME.name: { "unit": DiskMetric.RESPONSE_TIME.unit, "description": DiskMetric.RESPONSE_TIME.description }, DiskMetric.IOPS.name: { "unit": DiskMetric.IOPS.unit, "description": DiskMetric.IOPS.description }, DiskMetric.READ_THROUGHPUT.name: { "unit": DiskMetric.READ_THROUGHPUT.unit, "description": DiskMetric.READ_THROUGHPUT.description }, DiskMetric.WRITE_THROUGHPUT.name: { "unit": DiskMetric.WRITE_THROUGHPUT.unit, "description": DiskMetric.WRITE_THROUGHPUT.description }, DiskMetric.READ_IOPS.name: { "unit": DiskMetric.READ_IOPS.unit, "description": DiskMetric.READ_IOPS.description }, DiskMetric.WRITE_IOPS.name: { "unit": DiskMetric.WRITE_IOPS.unit, "description": DiskMetric.WRITE_IOPS.description }, }, ResourceType.FILESYSTEM: { FileSystemMetric.THROUGHPUT.name: { "unit": FileSystemMetric.THROUGHPUT.unit, "description": FileSystemMetric.THROUGHPUT.description }, FileSystemMetric.READ_RESPONSE_TIME.name: { "unit": FileSystemMetric.READ_RESPONSE_TIME.unit, "description": FileSystemMetric.READ_RESPONSE_TIME.description }, FileSystemMetric.WRITE_RESPONSE_TIME.name: { "unit": FileSystemMetric.WRITE_RESPONSE_TIME.unit, "description": FileSystemMetric.WRITE_RESPONSE_TIME.description }, FileSystemMetric.IOPS.name: { "unit": FileSystemMetric.IOPS.unit, "description": FileSystemMetric.IOPS.description }, FileSystemMetric.READ_THROUGHPUT.name: { "unit": FileSystemMetric.READ_THROUGHPUT.unit, "description": FileSystemMetric.READ_THROUGHPUT.description }, FileSystemMetric.WRITE_THROUGHPUT.name: { "unit": FileSystemMetric.WRITE_THROUGHPUT.unit, "description": FileSystemMetric.WRITE_THROUGHPUT.description }, FileSystemMetric.READ_IOPS.name: { "unit": FileSystemMetric.READ_IOPS.unit, "description": FileSystemMetric.READ_IOPS.description }, FileSystemMetric.WRITE_IOPS.name: { "unit": FileSystemMetric.WRITE_IOPS.unit, "description": FileSystemMetric.WRITE_IOPS.description }, FileSystemMetric.IO_SIZE.name: { "unit": FileSystemMetric.IO_SIZE.unit, "description": FileSystemMetric.IO_SIZE.description }, FileSystemMetric.READ_IO_SIZE.name: { "unit": FileSystemMetric.READ_IO_SIZE.unit, "description": FileSystemMetric.READ_IO_SIZE.description }, FileSystemMetric.WRITE_IO_SIZE.name: { "unit": FileSystemMetric.WRITE_IO_SIZE.unit, "description": FileSystemMetric.WRITE_IO_SIZE.description }, }, } } def custom_fake_get_capabilities(capabilities): def get_capability(context, storage_id): return capabilities return get_capability ================================================ FILE: delfin/tests/unit/api/test_api_validation.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import sys import fixtures import six from six.moves import http_client as http from delfin.api import validation from delfin.api.validation import parameter_types from delfin import exception from delfin import test class FakeRequest(object): environ = {} class ValidationRegex(test.TestCase): def test_build_regex_range(self): def _get_all_chars(): for i in range(0x7F): yield six.unichr(i) self.useFixture(fixtures.MonkeyPatch( 'delfin.api.validation.parameter_types._get_all_chars', _get_all_chars)) r = parameter_types._build_regex_range(ws=False) self.assertEqual(re.escape('!') + '-' + re.escape('~'), r) # if we allow whitespace the range starts earlier r = parameter_types._build_regex_range(ws=True) self.assertEqual(re.escape(' ') + '-' + re.escape('~'), r) # excluding a character will give us 2 ranges r = parameter_types._build_regex_range(ws=True, exclude=['A']) self.assertEqual(re.escape(' ') + '-' + re.escape('@') + 'B' + '-' + re.escape('~'), r) # inverting which gives us all the initial unprintable characters. r = parameter_types._build_regex_range(ws=False, invert=True) self.assertEqual(re.escape('\x00') + '-' + re.escape(' '), r) # excluding characters that create a singleton. Naively this would be: # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural. r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C']) self.assertEqual(re.escape(' ') + '-' + re.escape('@') + 'B' + 'D' + '-' + re.escape('~'), r) # ws=True means the positive regex has printable whitespaces, # so the inverse will not. The inverse will include things we # exclude. r = parameter_types._build_regex_range( ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True) self.assertEqual(re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ', r) class APIValidationTestCase(test.TestCase): def setUp(self, schema=None): super(APIValidationTestCase, self).setUp() self.post = None if schema is not None: @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def check_validation_error(self, method, body, expected_detail, req=None): if not req: req = FakeRequest() try: method(body=body, req=req,) except exception.InvalidInput as ex: self.assertEqual(http.BAD_REQUEST, ex.code) if isinstance(expected_detail, list): self.assertEqual(expected_detail, ex.error_args, 'Exception details did not match expected') else: self.assertEqual(expected_detail, ex.error_args[0], 'Exception details did not match expected') except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: self.fail('Any exception did not happen.') class RequiredDisableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, } super(RequiredDisableTestCase, self).setUp(schema=schema) def test_validate_required_disable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'abc': 1}, req=FakeRequest())) class RequiredEnableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'] } super(RequiredEnableTestCase, self).setUp(schema=schema) def test_validate_required_enable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) def test_validate_required_enable_fails(self): detail = "'foo' is a required property" self.check_validation_error(self.post, body={'abc': 1}, expected_detail=detail) class AdditionalPropertiesEnableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } super(AdditionalPropertiesEnableTestCase, self).setUp(schema=schema) def test_validate_additionalProperties_enable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 1, 'ext': 1}, req=FakeRequest())) class AdditionalPropertiesDisableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], 'additionalProperties': False, } super(AdditionalPropertiesDisableTestCase, self).setUp(schema=schema) def test_validate_additionalProperties_disable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) def test_validate_additionalProperties_disable_fails(self): detail = "Additional properties are not allowed ('ext' was unexpected)" self.check_validation_error(self.post, body={'foo': 1, 'ext': 1}, expected_detail=detail) class PatternPropertiesTestCase(APIValidationTestCase): def setUp(self): schema = { 'patternProperties': { '^[a-zA-Z0-9]{1,10}$': { 'type': 'string' }, }, 'additionalProperties': False, } super(PatternPropertiesTestCase, self).setUp(schema=schema) def test_validate_patternProperties(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'bar'}, req=FakeRequest())) def test_validate_patternProperties_fails(self): details = "'__' does not match any of the regexes: " \ "'^[a-zA-Z0-9]{1,10}$'" self.check_validation_error(self.post, body={'__': 'bar'}, expected_detail=details) details = "'' does not match any of the regexes: " \ "'^[a-zA-Z0-9]{1,10}$'" self.check_validation_error(self.post, body={'': 'bar'}, expected_detail=details) details = "'0123456789a' does not match any of the regexes: " \ "'^[a-zA-Z0-9]{1,10}$'" self.check_validation_error(self.post, body={'0123456789a': 'bar'}, expected_detail=details) ver_info = sys.version_info if ver_info.major == 3 and ver_info.minor >= 5: detail = "expected string or bytes-like object" else: detail = "expected string or buffer" self.check_validation_error(self.post, body={None: 'bar'}, expected_detail=detail) class StringTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', }, }, } super(StringTestCase, self).setUp(schema=schema) def test_validate_string(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ''}, req=FakeRequest())) def test_validate_string_fails(self): detail = ("Invalid input for field/attribute foo. " "1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "1.5 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1.5}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) class StringLengthTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'minLength': 1, 'maxLength': 10, }, }, } super(StringLengthTestCase, self).setUp(schema=schema) def test_validate_string_length(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0123456789'}, req=FakeRequest())) def test_validate_string_length_fails(self): detail = ("Invalid input for field/attribute foo." " '' is too short") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " '0123456789a' is too long") self.check_validation_error(self.post, body={'foo': '0123456789a'}, expected_detail=detail) class IntegerTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', }, }, } super(IntegerTestCase, self).setUp(schema=schema) def test_validate_integer(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0123456789'}, req=FakeRequest())) def test_validate_integer_fails(self): detail = ("Invalid input for field/attribute foo. " "'abc' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "'0xffff' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '0xffff'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "1.0 is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': 1.0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "'1.0' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '1.0'}, expected_detail=detail) class IntegerRangeTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': 10, }, }, } super(IntegerRangeTestCase, self).setUp(schema=schema) def test_validate_integer_range(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 10}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) def test_validate_integer_range_fails(self): detail = ("Invalid input for field/attribute foo. " "0 is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': 0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "11 is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': 11}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "0 is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': '0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. " "11 is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': '11'}, expected_detail=detail) class NameTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name, }, } super(NameTestCase, self).setUp(schema=schema) def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'volume.1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'volume 1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) class DatetimeTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'date-time', }, }, } super(DatetimeTestCase, self).setUp(schema=schema) def test_validate_datetime(self): self.assertEqual('Validation succeeded.', self.post(body={ 'foo': '2017-01-14T01:00:00Z'}, req=FakeRequest() )) ================================================ FILE: delfin/tests/unit/api/test_extensions.py ================================================ # Copyright 2020 The SODA Authors. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import iso8601 from oslo_config import cfg from oslo_serialization import jsonutils import webob from delfin import test from delfin.api import extensions from delfin.api.v1 import router CONF = cfg.CONF class ExtensionTestCase(test.TestCase): def setUp(self): super(ExtensionTestCase, self).setUp() ext_list = CONF.delfin_api_extension[:] fox = ('delfin.tests.unit.api.extensions.foxinsocks.Foxinsocks') if fox not in ext_list: ext_list.append(fox) self.flags(delfin_api_extension=ext_list) class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.ext_list = [] self.ext_list.sort() def test_list_extensions_json(self): app = router.APIRouter() request = webob.Request.blank("/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(self.ext_list, names) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: iso8601.parse_date(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual( {'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension.', 'alias': 'FOXNSOX', 'links': []}, fox_ext) for ext in data['extensions']: url = '/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(ext['alias'], output['extension']['alias']) def test_get_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(200, response.status_int) data = jsonutils.loads(response.body) self.assertEqual( {"name": "Fox In Socks", "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension.", "alias": "FOXNSOX", "links": []}, data['extension']) def test_get_non_existing_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/extensions/4") response = request.get_response(app) self.assertEqual(404, response.status_int) class StubExtensionManager(object): """Provides access to Tweedle Beetles.""" name = "Tweedle Beetle Extension" alias = "TWDLBETL" def __init__(self, resource_ext=None, action_ext=None, request_ext=None, controller_ext=None): self.resource_ext = resource_ext self.controller_ext = controller_ext self.extra_resource_ext = None def get_resources(self): resource_exts = [] if self.resource_ext: resource_exts.append(self.resource_ext) if self.extra_resource_ext: resource_exts.append(self.extra_resource_ext) return resource_exts def get_controller_extensions(self): controller_extensions = [] if self.controller_ext: controller_extensions.append(self.controller_ext) return controller_extensions class ExtensionControllerIdFormatTest(test.TestCase): def _bounce_id(self, test_id): class BounceController(object): def show(self, req, id): return id res_ext = extensions.ResourceExtension('bounce', BounceController()) manager = StubExtensionManager(res_ext) app = router.APIRouter(manager) request = webob.Request.blank("/bounce/%s" % test_id) response = request.get_response(app) return response.body def test_id_with_xml_format(self): result = self._bounce_id('foo.xml') self.assertEqual('foo', result.decode('UTF-8')) def test_id_with_json_format(self): result = self._bounce_id('foo.json') self.assertEqual('foo', result.decode('UTF-8')) def test_id_with_bad_format(self): result = self._bounce_id('foo.bad') self.assertEqual('foo.bad', result.decode('UTF-8')) ================================================ FILE: delfin/tests/unit/api/test_middlewares.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin import context from delfin import test from delfin.api import middlewares from delfin.wsgi import common class TestContextWrapper(test.TestCase): def _get_fake_req(self): env = {} req = common.Request(env) return req def test_context_wrapper(self): context_wrapper = middlewares.ContextWrapper(common.Application()) req = self._get_fake_req() context_wrapper(req) self.assertIsInstance(req.environ['delfin.context'], context.RequestContext) ================================================ FILE: delfin/tests/unit/api/test_wsgi.py ================================================ # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import six import webob import inspect from delfin.api.common import wsgi from delfin import exception from delfin import test from delfin.tests.unit.api import fakes @ddt.ddt class RequestTest(test.TestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = six.b("") self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = six.b("asdf
") self.assertRaises(exception.InvalidContentType, request.get_content_type) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept(self): content_type = 'application/json' request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(content_type, result) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_cache_and_retrieve_resources(self): request = wsgi.Request.blank('/foo') # Test that trying to retrieve a cached object on # an empty cache fails gracefully self.assertIsNone(request.cached_resource()) self.assertIsNone(request.cached_resource_by_id('r-0')) resources = [{'id': 'r-%s' % x} for x in range(3)] # Cache an empty list of resources using the default name request.cache_resource([]) self.assertEqual({}, request.cached_resource()) self.assertIsNone(request.cached_resource('r-0')) # Cache some resources request.cache_resource(resources[:2]) # Cache one resource request.cache_resource(resources[2]) # Cache a different resource name other_resource = {'id': 'o-0'} request.cache_resource(other_resource, name='other-resource') self.assertEqual(resources[0], request.cached_resource_by_id('r-0')) self.assertEqual(resources[1], request.cached_resource_by_id('r-1')) self.assertEqual(resources[2], request.cached_resource_by_id('r-2')) self.assertIsNone(request.cached_resource_by_id('r-3')) self.assertEqual( {'r-0': resources[0], 'r-1': resources[1], 'r-2': resources[2]}, request.cached_resource()) self.assertEqual( other_resource, request.cached_resource_by_id('o-0', name='other-resource')) @ddt.data( 'share_type', ) def test_cache_and_retrieve_resources_by_resource(self, resource_name): cache_all_func = 'cache_db_%ss' % resource_name cache_one_func = 'cache_db_%s' % resource_name get_db_all_func = 'get_db_%ss' % resource_name get_db_one_func = 'get_db_%s' % resource_name r = wsgi.Request.blank('/foo') amount = 5 res_range = range(amount) resources = [{'id': 'id%s' % x} for x in res_range] # Store 2 getattr(r, cache_all_func)(resources[:amount - 1]) # Store 1 getattr(r, cache_one_func)(resources[amount - 1]) for i in res_range: self.assertEqual( resources[i], getattr(r, get_db_one_func)('id%s' % i), ) self.assertIsNone(getattr(r, get_db_one_func)('id%s' % amount)) self.assertEqual( {'id%s' % i: resources[i] for i in res_range}, getattr(r, get_db_all_func)()) class ActionDispatcherTest(test.TestCase): def test_dispatch(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' self.assertEqual('pants', serializer.dispatch({}, action='create')) def test_dispatch_action_None(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action=None)) def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action='update')) class DictSerializerTest(test.TestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual('', serializer.serialize({}, 'update')) class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = six.b('{"servers":{"a":[2,3]}}') serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(six.b('\n'), six.b('')).replace(six.b(' '), six.b('')) self.assertEqual(expected_json, result) class TextDeserializerTest(test.TestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual({}, deserializer.deserialize({}, 'update')) class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) class ResourceTest(test.TestCase): def test_resource_call(self): class Controller(object): def index(self, req): return 'off' req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(six.b('off'), response.body) self.assertEqual(200, response.status_int) def test_resource_not_authorized(self): class Controller(object): def index(self, req): raise exception.NotAuthorized() req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(403, response.status_int) def test_dispatch(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(expected, actual) def test_get_method_undefined_controller_action(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(AttributeError, resource.get_method, None, 'create', None, '') def test_get_method_action_json(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/json', '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(exception.MalformedRequestBody, resource.get_method, None, 'action', 'application/json', '{}') def test_get_method_unknown_controller_action(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(KeyError, resource.get_method, None, 'action', 'application/json', '{"barAction": true}') def test_get_method_action_method(self): class Controller(object): def action(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/xml', 'true """ STORAGE_MODEL_DATA = """ Enclosure View Storage System Name [dx100-test] Model Upgrade Status [Not Upgraded] Model Name [ET103ACU] Serial Number [4601620378] Device Identification Number [280A7D] Status [Normal] Cache Mode [Write Back Mode] Remote Support [Not yet Set] Operation Mode [Normal] CLI Connecting Controller Module [CM#0] Firmware Version [V10L50-9003] Controller Enclosure (2.5") [Normal (Inside unused parts)] CLI> """ STORAGE_STATUS_DATA = """ Summary Status [Normal] CLI> """ NODE_DATAS = """ CM#0 Information CPU Status/Status Code [Normal / 0xE001] Memory Size [4.0GB] Parts Number [CA07662-D111] Serial Number [WK16201983] Hard Revision [AA] CPU Clock [1.40GHz] CM Active EC [EC#1] CM Next EC [EC#1] BIOS Active EC [EC#1] BIOS Next EC [EC#1] CM EXP Active EC [EC#1] CM EXP Next EC [EC#1] CM#0 Internal Parts Status/Status Code Memory#0 [Normal / 0xE001] Memory#0 Parts Number [18KDF51272PZ-1G6K1] Memory#0 Serial Number [1612121A68F5] Memory#0 Hard Revision [0F4B31] Memory#1 [Undefined / 0x0000] Memory#1 Parts Number [] Memory#1 Serial Number [] Memory#1 Hard Revision [] BUD [Normal / 0xE001] BUD Parts Number [TOSHIBA THNSNJ12] BUD Serial Number [56DS10ABTNWV ] BUD Hard Revision [JYFA0101] Port#0 [Unconnected / 0xC000] (Error Code : 0x0000) Port#1 [Unconnected / 0xC000] (Error Code : 0x0000) Port#2 [Undefined / 0x0000] Port#3 [Undefined / 0x0000] DMA Port#0 [Normal / 0xE001] DMA Port#1 [Undefined / 0x0000] BIOS#0 [Normal / 0xE001] BIOS#1 [Normal / 0xE001] CM EXP [Normal / 0xE001] CM EXP InPort#0 [Normal / 0xE001] CM EXP InPort#1 [Normal / 0xE001] SAS Cable#0(OUT) [Undefined / 0x0000] SAS Cable#1(OUT) [Undefined / 0x0000] CM RTC [Normal / 0xE001] CM NVRAM [Normal / 0xE001] CM FPGA [Normal / 0xE001] CM LAN Port#0 [Normal / 0xE001] CM LAN Port#1 [Normal / 0xE001] CM LAN Port#2 [Undefined / 0x0000] DI#0 Port#0 [Normal / 0xE001] DI#0 Port#1 [Normal / 0xE001] DI#1 Port#0 [Undefined / 0x0000] DI#1 Port#1 [Undefined / 0x0000] SATA SSD Controller Information Status/Status Code [Normal / 0xE001] Active EC [EC#1] Next EC [EC#1] Firmware Version [V03L04-0000] SCU [Normal / 0xE001] SCU Voltage [11.16V] CM#0 CA#0 Port#0 Information Port Type [FC] Port Mode [CA] Status/Status Code [Unconnected / 0xC000] (Error Code : 0x0000) CA Active EC [EC#0] CA Next EC [EC#0] Connection [Loop] Loop ID [0x00] Transfer Rate [Auto Negotiation] Link Status [Unknown] Port WWN [500000E0DA0A7D20] Node WWN [500000E0DA0A7D40] Host Affinity [Disable] Host Response [0] SFP Type [Unmount] SFP Information Present Warning(Low/High) Alarm(Low/High) Temperature [-] [-/-] [-/-] Voltage [-] [-/-] [-/-] Current [-] [-/-] [-/-] TX Power [-] [-/-] [-/-] RX Power [-] [-/-] [-/-] CM#0 CA#0 Port#1 Information Port Type [FC] Port Mode [CA] Status/Status Code [Unconnected / 0xC000] (Error Code : 0x0000) CA Active EC [EC#0] CA Next EC [EC#0] Connection [Loop] Loop ID [0x00] Transfer Rate [Auto Negotiation] Link Status [Unknown] Port WWN [500000E0DA0A7D21] Node WWN [500000E0DA0A7D40] Host Affinity [Disable] Host Response [0] SFP Type [Unmount] SFP Information Present Warning(Low/High) Alarm(Low/High) Temperature [-] [-/-] [-/-] Voltage [-] [-/-] [-/-] Current [-] [-/-] [-/-] TX Power [-] [-/-] [-/-] RX Power [-] [-/-] [-/-] CM#1 Information CPU Status/Status Code [Normal / 0xE001] Memory Size [4.0GB] Parts Number [CA07662-D111] Serial Number [WK16201958] Hard Revision [AA] CPU Clock [1.40GHz] CM Active EC [EC#1] CM Next EC [EC#1] BIOS Active EC [EC#1] BIOS Next EC [EC#1] CM EXP Active EC [EC#1] CM EXP Next EC [EC#1] CM#1 Internal Parts Status/Status Code Memory#0 [Normal / 0xE001] Memory#0 Parts Number [18KDF51272PZ-1G6K1] Memory#0 Serial Number [1612121A6900] Memory#0 Hard Revision [0F4B31] Memory#1 [Undefined / 0x0000] Memory#1 Parts Number [] Memory#1 Serial Number [] Memory#1 Hard Revision [] BUD [Normal / 0xE001] BUD Parts Number [TOSHIBA THNSNJ12] BUD Serial Number [56DS1086TNWV ] BUD Hard Revision [JYFA0101] Port#0 [Unconnected / 0xC000] (Error Code : 0x0000) Port#1 [Unconnected / 0xC000] (Error Code : 0x0000) Port#2 [Undefined / 0x0000] Port#3 [Undefined / 0x0000] DMA Port#0 [Normal / 0xE001] DMA Port#1 [Undefined / 0x0000] BIOS#0 [Normal / 0xE001] BIOS#1 [Normal / 0xE001] CM EXP [Normal / 0xE001] CM EXP InPort#0 [Normal / 0xE001] CM EXP InPort#1 [Normal / 0xE001] SAS Cable#0(OUT) [Undefined / 0x0000] SAS Cable#1(OUT) [Undefined / 0x0000] CM RTC [Normal / 0xE001] CM NVRAM [Normal / 0xE001] CM FPGA [Normal / 0xE001] CM LAN Port#0 [Normal / 0xE001] CM LAN Port#1 [Normal / 0xE001] CM LAN Port#2 [Undefined / 0x0000] DI#0 Port#0 [Normal / 0xE001] DI#0 Port#1 [Normal / 0xE001] DI#1 Port#0 [Undefined / 0x0000] DI#1 Port#1 [Undefined / 0x0000] SATA SSD Controller Information Status/Status Code [Normal / 0xE001] Active EC [EC#1] Next EC [EC#1] Firmware Version [V03L04-0000] SCU [Normal / 0xE001] SCU Voltage [11.16V] CM#1 CA#0 Port#0 Information Port Type [FC] Port Mode [CA] Status/Status Code [Unconnected / 0xC000] (Error Code : 0x0000) CA Active EC [EC#0] CA Next EC [EC#0] Connection [Loop] Loop ID [0x00] Transfer Rate [Auto Negotiation] Link Status [Unknown] Port WWN [500000E0DA0A7D30] Node WWN [500000E0DA0A7D40] Host Affinity [Disable] Host Response [0] SFP Type [Unmount] SFP Information Present Warning(Low/High) Alarm(Low/High) Temperature [-] [-/-] [-/-] Voltage [-] [-/-] [-/-] Current [-] [-/-] [-/-] TX Power [-] [-/-] [-/-] RX Power [-] [-/-] [-/-] CM#1 CA#0 Port#1 Information Port Type [FC] Port Mode [CA] Status/Status Code [Unconnected / 0xC000] (Error Code : 0x0000) CA Active EC [EC#0] CA Next EC [EC#0] Connection [Loop] Loop ID [0x00] Transfer Rate [Auto Negotiation] Link Status [Unknown] Port WWN [500000E0DA0A7D31] Node WWN [500000E0DA0A7D40] Host Affinity [Disable] Host Response [0] SFP Type [Unmount] SFP Information Present Warning(Low/High) Alarm(Low/High) Temperature [-] [-/-] [-/-] Voltage [-] [-/-] [-/-] Current [-] [-/-] [-/-] TX Power [-] [-/-] [-/-] RX Power [-] [-/-] [-/-] CE PSU#0 Information Status/Status Code [Normal / 0xE001] CE PSU#1 Information Status/Status Code [Normal / 0xE001] login as: f.ce Pre-authentication banner message from server: | FUJITSU Storage ETERNUS login is required. [2021-11-30 06:50:01] End of banner message from server f.ce@192.168.1.1's password: Access denied f.ce@192.168.1.1's password: Currently Network Configuration is set to factory default. CLI> """ NODE_DATAS_OLD = """your ip address you username is huawei CLI> show fru-ce CM#0 Information Status/Status Code [Normal / 0xE001] Memory Size [1.0GB] Type [FC Model] Parts Number [CA07415-C621] Serial Number [WK13510516] Hardware Revision [AA ] CPU Clock [1.20GHz] Active EC [EC#1] Next EC [EC#1] CM#0 Internal Parts Status/Status Code Memory [Normal / 0xE001] BE Expander [Normal / 0xE001] BE EXP Port#0 [Normal / 0xE001] BE EXP Port#1 [Undefined / 0x0000] BE EXP Port#2 [Normal / 0xE001] DI Port#0 [Normal / 0xE001] DI Port#1 [Normal / 0xE001] FC Port#0 [Normal / 0xE001] FC Port#1 [Normal / 0xE001] SAS Cable#1(OUT) [- / - ] NAND Controller [Normal / 0xE001] Flash ROM [Normal / 0xE001] CM#0 SCU Information Status/Status Code [Normal / 0xE001] Voltage [9.50V] Expires [0-00] CM#0 Port#0 Information Port Mode [CA] Status/Status Code [Normal / 0xE001] Connection [Fabric] Loop ID [-] Transfer Rate [Auto Negotiation] Link Status [4Gbit/s Link Up] WWN [500000E0D0376706] Host Affinity [Enable] Host Response [-] CM#0 Port#1 Information Port Mode [CA] Status/Status Code [Normal / 0xE001] Connection [Loop] Loop ID [-] Transfer Rate [Auto Negotiation] Link Status [Link Down] WWN [500000E0D0376707] Host Affinity [Enable] Host Response [-] CM#1 Information Status/Status Code [Normal / 0xE001] Memory Size [1.0GB] Type [FC Model] Parts Number [CA07415-C621] Serial Number [WK13510538] Hardware Revision [AA ] CPU Clock [1.20GHz] Active EC [EC#1] Next EC [EC#1] CM#1 Internal Parts Status/Status Code Memory [Normal / 0xE001] BE Expander [Normal / 0xE001] BE EXP Port#0 [Normal / 0xE001] BE EXP Port#1 [Undefined / 0x0000] BE EXP Port#2 [Normal / 0xE001] DI Port#0 [Normal / 0xE001] DI Port#1 [Normal / 0xE001] FC Port#0 [Normal / 0xE001] FC Port#1 [Normal / 0xE001] SAS Cable#1(OUT) [- / - ] NAND Controller [Normal / 0xE001] Flash ROM [Normal / 0xE001] CM#1 SCU Information Status/Status Code [Normal / 0xE001] Voltage [9.50V] Expires [0-00] CM#1 Port#0 Information Port Mode [CA] Status/Status Code [Normal / 0xE001] Connection [Loop] Loop ID [-] Transfer Rate [Auto Negotiation] Link Status [Link Down] WWN [500000E0D0376786] Host Affinity [Enable] Host Response [-] CM#1 Port#1 Information Port Mode [CA] Status/Status Code [Normal / 0xE001] Connection [Fabric] Loop ID [-] Transfer Rate [Auto Negotiation] Link Status [4Gbit/s Link Up] WWN [500000E0D0376787] Host Affinity [Enable] Host Response [-] CE PSU#0 Information Status/Status Code [Normal / 0xE001] CE PSU#1 Information Status/Status Code [Normal / 0xE001] CLI>""" NODE_STATUS_DATAS = """ Controller Enclosure Information Location Status Error Code Sensor 1 / Sensor 2 Intake Temp Normal 0x0000 24 (C) / 23 (C) Exhaust Temp Normal 0x0000 40 (C) / 42 (C) Controller Enclosure Status Controller Module Status/Status Code CM#0 [Normal / 0xE001] CM#1 [Normal / 0xE001] Power Supply Unit Status/Status Code PSU#0 [Normal / 0xE001] PSU#1 [Normal / 0xE001] Disk Status CE-Disk#0 [Available ] CE-Disk#1 [Available ] CE-Disk#2 [Available ] CE-Disk#3 [Available ] CE-Disk#4 [Present ] CE-Disk#5 [Available ] CE-Disk#6 [Available ] CE-Disk#7 [Available ] CE-Disk#8 [Available ] CE-Disk#9 [Available ] CLI> """ POOL_DATAS = """ [RAID Group No.],[RAID Group Name,R,M,Status,TotalCapacity(MB),FreeCapacity(MB) 0,pool-1,RAID1+0,CM#0,Available,1118208,1115926 1,pool-2,RAID5,CM#1,Available,1118208,1118208 CLI> """ POOL_OLD_DATAS = """your ip address you username is huawei CLI> show raid-groups RAID Group RAID Assigned Status\ Total Free No. Name Level CM Capacity(MB)\ Capacity(MB) 0 JJ RAID0 CM#0 Broken 1676288\ 1358848 CLI> """ POOL_ERROR_DATAS = """ ^ Error: Ambiguous command CLI>""" VOLUME_TPV_DATAS \ = """Volume Status RG or TPP or FTRP TFOG Size(MB) Copy Allocation Used Me No. Name No. Name No. Name Protection Status (%) Level Capacity(MB) ----- ------ ---- ---- ------- --- ------ --- ---- --- --- --- -- --- ----- 1 volume-wsv Available 0 thin-1 - - 200 Disable Thick Normal - 80 High 200 4 voo-1 Available 0 thin-1 - - 500 Disable Thin Normal >500 80 High 0 CLI> """ VOLUME_FTV_DATAS \ = """Error: E0331 Flexible tier mode is not valid. [0305-0505] -type ftv CLI> """ VOLUME_DATAS = """your ip address you username is huawei CLI> show raid-groups Volume Status Type\ Expansion RAID Group Size(MB) Reserved No. Name (Concatenation) No.\ Name Deletion 0 OVM_Repo0 Broken Open - 0\ JJ 51200 1 OVM_Repo1 Broken Open - 0\ JJ 51200 2 OVM_raw Broken Open - 0\ JJ 10240 3 OVM_Repo2 Broken Open - 0\ JJ 204800 CLI>""" VOLUMES_ERROR = """ ^ CLI>""" VOLUMES = """CLI> show volumes -mode uid Volume Status Type\ RG or TPP or FTRP TFOG Size(MB) UID No. Name \ No. Name No. Name ID\ Mode ----- -------------------------------- ------------------------- ----------\ ------- ---- ---------------- --- ---------------- --------- ---------------\ ----------------- ------- 0 LUN00 Available TPV \ 0 Pool0 - - 20480\ 600000E00D29000000291B6B00000000 Default 1 LUN01 Available TPV \ 0 Pool0 - - 20480\ 600000E00D29000000291B6B00010000 Default 2 LUN02 Available TPV\ 0 Pool0 - - 20480\ 600000E00D29000000291B6B00020000 Default 3 LUN03 Available TPV\ 0 Pool0 - - 20480\ 600000E00D29000000291B6B00030000 Default 4 LUN04 Available TPV\ 0 Pool0 - - 20480\ 600000E00D29000000291B6B00040000 Default CLI>""" STORAGE_RESULT = { 'name': 'dx100-test', 'vendor': 'FUJITSU', 'description': 'test dx100-test', 'model': 'ET103ACU', 'status': 'normal', 'serial_number': '4601620378', 'firmware_version': 'V10L50-9003', 'location': 'test location', 'raw_capacity': 6657199308800.0, 'total_capacity': 2345052143616, 'used_capacity': 2392850432, 'free_capacity': 2342659293184 } CONTROLLER_RESULT = [ { 'name': 'CM#0', 'storage_id': '12345', 'native_controller_id': 'WK16201983', 'status': 'normal', 'location': 'CM#0', 'soft_version': 'AA', 'cpu_info': '1.40GHz', 'cpu_count': 1, 'memory_size': '4294967296' }] POOL_RESULT = [ { 'name': 'pool-1', 'storage_id': '12345', 'native_storage_pool_id': '0', 'status': 'normal', 'storage_type': 'block', 'total_capacity': 1172526071808, 'used_capacity': 2392850432, 'free_capacity': 1170133221376 }] POOL_old_RESULT = [ { 'name': 'JJ', 'storage_id': '12345', 'native_storage_pool_id': '0', 'status': 'abnormal', 'storage_type': 'block', 'total_capacity': 1757715365888, 'used_capacity': 332859965440, 'free_capacity': 1424855400448 }] VOLUME_RESULT = [ { 'name': 'LUN00', 'storage_id': '12345', 'status': 'normal', 'native_volume_id': '0', 'native_storage_pool_id': '0', 'type': 'thick', 'wwn': '600000E00D29000000291B6B00000000', 'total_capacity': 21474836480, 'used_capacity': 0, 'free_capacity': 21474836480 }] VOLUME_OLD_RESULT = [ { 'name': 'OVM_Repo0', 'storage_id': '12345', 'status': 'abnormal', 'native_volume_id': '0', 'native_storage_pool_id': '0', 'type': 'thick', 'total_capacity': 53687091200, 'used_capacity': 0, 'free_capacity': 0 }] LIST_ALERT_ERROR = """your ip address you username is huawei CLI> show raid-groups 2021-08-19 02:33:08 Error P 85400008 SS\ D 2.5 DE#00-Slot#8(SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400007 SSD 2.5 DE#00-Slot#7(\ SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400006 SSD 2.5 DE#00-Slot#6(\ SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400005 SSD 2.5 DE#00-Slot#5(\ SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400004 SSD 2.5 DE#00-Slot#4(\ SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400003 SSD 2.5 DE#00-Slot#3(\ SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400002 SSD 2.5 DE#00-Slot#2(\ SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400001 SSD 2.5 DE#00-Slot#1(\ SAS 400GB) Fault (DE) 2021-08-19 02:33:08 Error P 85400000 SSD 2.5 DE#00-Slot#0(\ SAS 400GB) Fault (DE) CLI>""" LIST_ALERT_WARNING = """your ip address you username is huawei CLI> show raid-groups 2021-08-19 02:33:08 Warning P 85400008 SSD\ Fault (DE) 2021-08-19 02:33:08 Warning P 85400007 SSD 2.5 Fault (DE) <\ HUSMM1640ASS204 0QWAHN1A H603 15299 A1> 2021-08-19 02:33:08 Warning P 85400006 SSD 2.5 Fault (DE) <\ HUSMM1640ASS204 0QWA9GSA H603 15299 A1> 2021-08-19 02:33:08 Warning P 85400005 SSD 2.5 Fault (DE) <\ HUSMM1640ASS204 0QWA91YA H603 15299 A1> 2021-08-19 02:33:08 Warning P 85400004 SSD 2.5 Fault (DE) <\ HUSMM1640ASS204 0QWA9HMA H603 15299 A1> 2021-08-19 02:33:08 Warning P 85400003 SSD 2.5 Fault (DE) <\ HUSMM1640ASS204 0QWA908A H603 15299 A1> 2021-08-19 02:33:08 Warning P 85400002 SSD 2.5 DE#00-Fault (DE) <\ HUSMM1640ASS204 0QWAHMAA H603 15299 A1> 2021-08-19 02:33:08 Warning P 85400001 SSD 2.5 DE#00-S Fault (DE) <\ HUSMM1640ASS204 0QWA9KJA H603 15299 A1> 2021-08-19 02:33:08 Warning P 85400000 SSD 2.5 DE#00- Fault (DE) <\ HUSMM1640ASS204 0QWA9GMA H603 15299 A1> CLI>""" ALERTS_INFO = { 'alert_id': '85400008', 'severity': 'Warning', 'category': 'Fault', 'description': 'SSDFault (DE) ', 'type': 'EquipmentAlarm', 'resource_type': 'Storage', 'alert_name': 'SSDFault (DE) ', 'occur_time': 1629311588000, 'match_key': '1809bdfa672e8b10ec9ec499a54dcd83' } DISK_LIST_INFO = """Controller Enclosure Disk #0 Information Location [CE-Disk#0] Status [Present] (Error Code : 0x0000) Size [400GB] Type [2.5 SSD-M] Speed [-] Usage [Data] Health [100%] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [HGST] Product ID [HUSMM1640ASS204] Serial Number [0QWA91YA] WWN [5000CCA04E4B14F3] Firmware Revision [H603] Total completed passes [0Cycles] Progress with current pass [4%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #1 Information Location [CE-Disk#1] Status [Present] (Error Code : 0x0000) Size [400GB] Type [2.5 SSD-M] Speed [-] Usage [Data] Health [100%] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [HGST] Product ID [HUSMM1640ASS204] Serial Number [0QWAHN1A] WWN [5000CCA04E4B77CF] Firmware Revision [H603] Total completed passes [0Cycles] Progress with current pass [4%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #2 Information Location [CE-Disk#2] Status [Present] (Error Code : 0x0000) Size [400GB] Type [2.5 SSD-M] Speed [-] Usage [Data] Health [100%] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [HGST] Product ID [HUSMM1640ASS204] Serial Number [0QWA9GMA] WWN [5000CCA04E4B1B17] Firmware Revision [H603] Total completed passes [0Cycles] Progress with current pass [4%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #3 Information Location [CE-Disk#3] Status [Present] (Error Code : 0x0000) Size [400GB] Type [2.5 SSD-M] Speed [-] Usage [Data] Health [100%] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [HGST] Product ID [HUSMM1640ASS204] Serial Number [0QWA9KJA] WWN [5000CCA04E4B1C7F] Firmware Revision [H603] Total completed passes [0Cycles] Progress with current pass [4%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #4 Information Location [CE-Disk#4] Status [Present] (Error Code : 0x0000) Size [400GB] Type [2.5 SSD-M] Speed [-] Usage [Data] Health [100%] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [HGST] Product ID [HUSMM1640ASS204] Serial Number [0QWAHMAA] WWN [5000CCA04E4B7777] Firmware Revision [H603] Total completed passes [0Cycles] Progress with current pass [4%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #5 Information Location [CE-Disk#5] Status [Present] (Error Code : 0x0000) Size [600GB] Type [2.5 Online] Speed [15000rpm] Usage [Data] Health [-] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST600MP0005] Serial Number [S7M1LC92] WWN [5000C50098FA0A04] Firmware Revision [VE0C] Total completed passes [0Cycles] Progress with current pass [3%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #6 Information Location [CE-Disk#6] Status [Present] (Error Code : 0x0000) Size [600GB] Type [2.5 Online] Speed [15000rpm] Usage [Data] Health [-] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST600MP0005] Serial Number [W7M0M8PR] WWN [5000C500A0FA7844] Firmware Revision [VE0C] Total completed passes [0Cycles] Progress with current pass [3%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #7 Information Location [CE-Disk#7] Status [Present] (Error Code : 0x0000) Size [600GB] Type [2.5 Online] Speed [15000rpm] Usage [Data] Health [-] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST600MP0005] Serial Number [S7M1LC99] WWN [5000C50098FA09DC] Firmware Revision [VE0C] Total completed passes [0Cycles] Progress with current pass [3%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #8 Information Location [CE-Disk#8] Status [Present] (Error Code : 0x0000) Size [600GB] Type [2.5 Online] Speed [15000rpm] Usage [Data] Health [-] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST600MP0005] Serial Number [S7M1L3XD] WWN [5000C50098EE374C] Firmware Revision [VE0C] Total completed passes [0Cycles] Progress with current pass [3%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #9 Information Location [CE-Disk#9] Status [Present] (Error Code : 0x0000) Size [600GB] Type [2.5 Online] Speed [15000rpm] Usage [Data] Health [-] RAID Group [ 0 : pool-1] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST600MP0005] Serial Number [S7M1KXS5] WWN [5000C50098F06184] Firmware Revision [VE0C] Total completed passes [0Cycles] Progress with current pass [3%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #10 Information Location [CE-Disk#10] Status [Present] (Error Code : 0x0000) Size [600GB] Type [2.5 Online] Speed [15000rpm] Usage [Data] Health [-] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST600MP0005] Serial Number [S7M1KCPD] WWN [5000C50098DB1E50] Firmware Revision [VE0C] Total completed passes [0Cycles] Progress with current pass [3%] Completed passes since last Power On [0Cycles] Controller Enclosure Disk #11 Information Location [CE-Disk#11] Status [Present] (Error Code : 0x0000) Size [600GB] Type [2.5 Online] Speed [15000rpm] Usage [Data] Health [-] RAID Group [-] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST600MP0005] Serial Number [W7M0MYYA] WWN [5000C500A0F7C4D0] Firmware Revision [VE0C] Total completed passes [0Cycles] Progress with current pass [3%] Completed passes since last Power On [0Cycles] CLI>""" DISK_OLD = """your ip address you username is huawei CLI> show disks -disks all Controller Enclosure Disk #0 Information Location [CE-Disk#0] Status [Failed Usable] (Error Code : 0x0001) Size [450GB] Type [3.5" SAS] Speed [15000rpm] Usage [System] RAID Group [ 0 : JJ] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST3450857SS] Serial Number [6SK2CEG91327] WWN [5000C5006BC80184] Firmware Revision [GF0D] \r Controller Enclosure Disk #1 Information Location [CE-Disk#1] Status [Failed Usable] (Error Code : 0x0009) Size [450GB] Type [3.5" SAS] Speed [15000rpm] Usage [System] RAID Group [ 0 : JJ] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST3450857SS] Serial Number [6SK262SZ1312] WWN [5000C5006806E318] Firmware Revision [GF0D] \r Controller Enclosure Disk #2 Information Location [CE-Disk#2] Status [Available] Size [450GB] Type [3.5" SAS] Speed [15000rpm] Usage [Data] RAID Group [ 0 : JJ] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST3450857SS] Serial Number [6SK26QCA1312] WWN [5000C5006810B6AC] Firmware Revision [GF0D] \r Controller Enclosure Disk #3 Information Location [CE-Disk#3] Status [Available] Size [450GB] Type [3.5" SAS] Speed [15000rpm] Usage [Data] RAID Group [ 0 : JJ] Motor Status [Active] Rebuild/Copyback Progress [-] Vendor ID [SEAGATE] Product ID [ST3450857SS] Serial Number [6SK2DE941330] WWN [5000C5006C3E26FC] Firmware Revision [GF0D] CLI>""" PORT_LIST_INFO = """ Port CM#0 CA#0 Port#0 CM#0 CA#0 Port#1 Port Mode CA CA Connection FC-AL FC-AL Loop ID Assign Manual(0x00) Manual(0x00) Transfer Rate 8 Gbit/s Auto Negotiation Frame Size 2048 bytes 2048 bytes Host Affinity Disable Disable Host Response No. 0 0 Host Response Name Default Default Reset Scope I_T_L I_T_L Reserve Cancel at Chip Reset Disable Disable REC Line No. - - REC Transfer Mode Sync - - REC Transfer Mode Stack - - REC Transfer Mode Consistency - - REC Transfer Mode Through - - TFO Transfer Mode - - WWN Mode Custom Custom WWPN 500000E0DA0A7D20 500000E0DA0A7D21 Port CM#1 CA#0 Port#0 CM#1 CA#0 Port#1 Port Mode CA CA Connection FC-AL FC-AL Loop ID Assign Manual(0x00) Manual(0x00) Transfer Rate Auto Negotiation Auto Negotiation Frame Size 2048 bytes 2048 bytes Host Affinity Disable Disable Host Response No. 0 0 Host Response Name Default Default Reset Scope I_T_L I_T_L Reserve Cancel at Chip Reset Disable Disable REC Line No. - - REC Transfer Mode Sync - - REC Transfer Mode Stack - - REC Transfer Mode Consistency - - REC Transfer Mode Through - - TFO Transfer Mode - - WWN Mode Custom Custom WWPN 500000E0DA0A7D30 500000E0DA0A7D31 CLI>""" FCOE_INFO = """Port CM#0 CA#0 Port#0 CM#0\ CA#0 Port#1 Port Mode CA RA Transfer Rate 10Gbit/s 10Gbit/s Frame Size 2048bytes 2048bytes Host Affinity Enable Enable Host Response No. 1 2 Host Response Name HP01 HP02 Reset Scope I_T_L I_T_L Reserve Cancel at Chip Reset Disable - FCF VLAN ID Disable Disable FCF Fabric Name Disable Disable MAC Address 01:02:03:04:05:06 01:02:03:04:05:07 Port CM#0 CA#1 Port#0 CM#0 CA#1 Port#1 Port Mode CA RA Transfer Rate 10Gbit/s 10Gbit/s Frame Size 2048bytes 2048bytes Host Affinity Enable Enable Host Response No. 1 2 Host Response Name HP01 HP02 Reset Scope I_T_L I_T_L Reserve Cancel at Chip Reset Disable - FCF VLAN ID Disable Disable FCF Fabric Name Disable Disable MAC Address 01:02:03:06:05:06 01:02:03:06:05:07 CLI>""" FC_INFO_OLD = """your ip address you username is huawei CLI> show fc-parameters Port CM#0 Port#0 CM#0 Port#1\ CM#1 Port#0 CM#1 Port#1 Port Mode CA CA CA\ CA Connection Fabric FC-AL FC-AL\ Fabric Loop ID Assign - Auto(Ascending)\ Auto(Ascending) - Transfer Rate Auto Negotiation Auto Negotiation\ Auto Negotiation Auto Negotiation Frame Size 2048 bytes 2048 bytes\ 2048 bytes 2048 bytes Host Affinity Enable Enable Enable\ Enable Host Response No. - - -\ - Host Response Name - - -\ - Reset Scope I_T_L I_T_L I_T_L\ I_T_L Reserve Cancel at Chip Reset Enable Enable Enable\ Enable\ CLI>""" HOST_STATUS_INFO = """CLI> show host-path-state Port Host Path State No. Name --------------------- ---- ---------------- ---------- CM#0 CA#0 Port#0 0 dbs01_0 Online CM#0 CA#0 Port#0 1 dbs01_1 Online CM#0 CA#0 Port#1 1 dbs01_1 Online CM#0 CA#0 Port#1 2 dbs02_0 Online CM#1 CA#0 Port#0 0 dbs01_0 Online CM#1 CA#0 Port#0 1 dbs01_1 Online CM#1 CA#0 Port#0 3 dbs02_1 Online CM#1 CA#0 Port#1 7 h_g_1_0 Online CLI>""" FC_HOSTS_INFO = """CLI> show host-wwn-names Host WWN Host Response No. Name No. Name ---- ---------------- ---------------- --- ---------------- 0 dbs01_0 10000090faec8449 0 Default 1 dbs01_1 10000090faec84a7 0 Default 2 dbs02_0 10000090faec852a 0 Default 3 dbs02_1 10000090faec842d 0 Default 4 dbs03_0 10000090faec7f2f 0 Default 5 dbs03_1 10000090faec7f06 0 Default 7 h_g_1_0 12ac13ab15af21ae 252 AIX CLI>""" ISCSI_HOST_INFO = """CLI> show host-iscsi-names Host Host Response IP Address\ iSCSI Name CmdSN Count No. Name No. Name ---- ---------------- --- ---------------- ---------------------------------\ ------ -------------------------------- ----------- 0 iscsi_host_0 252 AIX 126.0.0.2\ iqn.2006-08.com.huawei:21004447d Unlimited cca426::0 1 iscsi_host-1_0 252 AIX 126.0.0.3\ iqn.2006-08.com.huawei:21004447d Unlimited cca426::1 2 iscsi_1_0 0 Default *(IPv6)\ iqn.2007-08.com.huawei:21004447d Unlimited cca426::7\ CLI>""" ISCSI_HOST_DETAIL_ZERO = """CLI> show host-iscsi-names -host-number 0 Host No. 0 Host Name iscsi_host_0 iSCSI Name iqn.2006-08.com.huawei:21004447dcca426::0 Alias Name iscsi 230 25 IP Address 126.0.0.2 Chap User Name Host Response No. 252 Host Response Name AIX CmdSN Count Unlimited CLI>""" ISCSI_HOST_DETAIL_ONE = """CLI> show host-iscsi-names -host-number 1 Host No. 1 Host Name iscsi_host-1_0 iSCSI Name iqn.2006-08.com.huawei:21004447dcca426::1 Alias Name iscsi1 IP Address 126.0.0.3 Chap User Name Host Response No. 252 Host Response Name AIX CmdSN Count Unlimited CLI>""" ISCSI_HOST_DETAIL_TWO = """CLI> show host-iscsi-names -host-number 2 Host No. 2 Host Name iscsi_1_0 iSCSI Name iqn.2007-08.com.huawei:21004447dcca426::7 Alias Name IP Address *(IPv6) Chap User Name Host Response No. 0 Host Response Name Default CmdSN Count Unlimited CLI>""" SAS_HOST_INFO = """CLI> show host-sas-addresses Host SAS Address Host Response No. Name No. Name ---- ---------------- ---------------- --- ---------------- 6 sas_g_0_0 12ab13ac14ad15af 253 AIX VxVM 8 sas2_0 14ab13ac46ae20af 0 Default CLI>""" INITIATORS_DATA = [ {'name': '10000090faec8449', 'storage_id': '12345', 'native_storage_host_initiator_id': '10000090faec8449', 'wwn': '10000090faec8449', 'status': 'online', 'native_storage_host_id': 'dbs01_0', 'type': 'fc'}, {'name': '10000090faec84a7', 'storage_id': '12345', 'native_storage_host_initiator_id': '10000090faec84a7', 'wwn': '10000090faec84a7', 'status': 'online', 'native_storage_host_id': 'dbs01_1', 'type': 'fc'}, {'name': '10000090faec852a', 'storage_id': '12345', 'native_storage_host_initiator_id': '10000090faec852a', 'wwn': '10000090faec852a', 'status': 'online', 'native_storage_host_id': 'dbs02_0', 'type': 'fc'}, {'name': '10000090faec842d', 'storage_id': '12345', 'native_storage_host_initiator_id': '10000090faec842d', 'wwn': '10000090faec842d', 'status': 'online', 'native_storage_host_id': 'dbs02_1', 'type': 'fc'}, {'name': '10000090faec7f2f', 'storage_id': '12345', 'native_storage_host_initiator_id': '10000090faec7f2f', 'wwn': '10000090faec7f2f', 'status': 'offline', 'native_storage_host_id': 'dbs03_0', 'type': 'fc'}, {'name': '10000090faec7f06', 'storage_id': '12345', 'native_storage_host_initiator_id': '10000090faec7f06', 'wwn': '10000090faec7f06', 'status': 'offline', 'native_storage_host_id': 'dbs03_1', 'type': 'fc'}, {'name': '12ac13ab15af21ae', 'storage_id': '12345', 'native_storage_host_initiator_id': '12ac13ab15af21ae', 'wwn': '12ac13ab15af21ae', 'status': 'online', 'native_storage_host_id': 'h_g_1_0', 'type': 'fc'}, {'name': 'iqn.2006-08.com.huawei:21004447dcca426::0', 'storage_id': '12345', 'native_storage_host_initiator_id': 'iqn.2006-08.com.huawei:21004447dcca426::0', 'wwn': 'iqn.2006-08.com.huawei:21004447dcca426::0', 'status': 'offline', 'native_storage_host_id': 'iscsi_host_0', 'type': 'iscsi', 'alias': 'iscsi 230 25'}, {'name': 'iqn.2006-08.com.huawei:21004447dcca426::1', 'storage_id': '12345', 'native_storage_host_initiator_id': 'iqn.2006-08.com.huawei:21004447dcca426::1', 'wwn': 'iqn.2006-08.com.huawei:21004447dcca426::1', 'status': 'offline', 'native_storage_host_id': 'iscsi_host-1_0', 'type': 'iscsi', 'alias': 'iscsi1'}, {'name': 'iqn.2007-08.com.huawei:21004447dcca426::7', 'storage_id': '12345', 'native_storage_host_initiator_id': 'iqn.2007-08.com.huawei:21004447dcca426::7', 'wwn': 'iqn.2007-08.com.huawei:21004447dcca426::7', 'status': 'offline', 'native_storage_host_id': 'iscsi_1_0', 'type': 'iscsi', 'alias': None}, {'name': '12ab13ac14ad15af', 'storage_id': '12345', 'native_storage_host_initiator_id': '12ab13ac14ad15af', 'wwn': '12ab13ac14ad15af', 'status': 'offline', 'native_storage_host_id': 'sas_g_0_0', 'type': 'sas'}, {'name': '14ab13ac46ae20af', 'storage_id': '12345', 'native_storage_host_initiator_id': '14ab13ac46ae20af', 'wwn': '14ab13ac46ae20af', 'status': 'offline', 'native_storage_host_id': 'sas2_0', 'type': 'sas'}] HOSTS_DATA = [ {'name': 'dbs01_0', 'storage_id': '12345', 'native_storage_host_id': 'dbs01_0', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'dbs01_1', 'storage_id': '12345', 'native_storage_host_id': 'dbs01_1', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'dbs02_0', 'storage_id': '12345', 'native_storage_host_id': 'dbs02_0', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'dbs02_1', 'storage_id': '12345', 'native_storage_host_id': 'dbs02_1', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'dbs03_0', 'storage_id': '12345', 'native_storage_host_id': 'dbs03_0', 'os_type': 'Unknown', 'status': 'offline'}, {'name': 'dbs03_1', 'storage_id': '12345', 'native_storage_host_id': 'dbs03_1', 'os_type': 'Unknown', 'status': 'offline'}, {'name': 'h_g_1_0', 'storage_id': '12345', 'native_storage_host_id': 'h_g_1_0', 'os_type': 'AIX', 'status': 'normal'}, {'name': 'iscsi_host_0', 'storage_id': '12345', 'native_storage_host_id': 'iscsi_host_0', 'os_type': 'AIX', 'status': 'offline', 'ip_address': '126.0.0.2'}, {'name': 'iscsi_host-1_0', 'storage_id': '12345', 'native_storage_host_id': 'iscsi_host-1_0', 'os_type': 'AIX', 'status': 'offline', 'ip_address': '126.0.0.3'}, {'name': 'iscsi_1_0', 'storage_id': '12345', 'native_storage_host_id': 'iscsi_1_0', 'os_type': 'Unknown', 'status': 'offline', 'ip_address': None}, {'name': 'sas_g_0_0', 'storage_id': '12345', 'native_storage_host_id': 'sas_g_0_0', 'os_type': 'AIX', 'status': 'offline'}, {'name': 'sas2_0', 'storage_id': '12345', 'native_storage_host_id': 'sas2_0', 'os_type': 'Unknown', 'status': 'offline'}] HOST_GROUPS_INFO = """CLI> show host-groups -all Host Group Host Response Host Type No. Name No. Name ---- ---------------- --- ---------------- ---------- 0 dbs01 0 Default FC/FCoE Host WWN No. Name ---- ---------------- ---------------------------------------- 0 dbs01_0 10000090faec8449 1 dbs01_1 10000090faec84a7 Host Group Host Response Host Type No. Name No. Name ---- ---------------- --- ---------------- ---------- 1 dbs02 0 Default FC/FCoE Host WWN No. Name ---- ---------------- ---------------------------------------- 2 dbs02_0 10000090faec852a 3 dbs02_1 10000090faec842d Host Group Host Response Host Type No. Name No. Name ---- ---------------- --- ---------------- ---------- 2 dbs03 0 Default FC/FCoE Host WWN No. Name ---- ---------------- ---------------------------------------- 4 dbs03_0 10000090faec7f2f 5 dbs03_1 10000090faec7f06 CLI>""" HOST_GROUPS_DATA = { 'storage_host_groups': [{'name': 'dbs01', 'storage_id': '12345', 'native_storage_host_group_id': '0'}, {'name': 'dbs02', 'storage_id': '12345', 'native_storage_host_group_id': '1'}, {'name': 'dbs03', 'storage_id': '12345', 'native_storage_host_group_id': '2'}], 'storage_host_grp_host_rels': [ {'storage_id': '12345', 'native_storage_host_group_id': '0', 'native_storage_host_id': 'dbs01_0'}, {'storage_id': '12345', 'native_storage_host_group_id': '0', 'native_storage_host_id': 'dbs01_1'}, {'storage_id': '12345', 'native_storage_host_group_id': '1', 'native_storage_host_id': 'dbs02_0'}, {'storage_id': '12345', 'native_storage_host_group_id': '1', 'native_storage_host_id': 'dbs02_1'}, {'storage_id': '12345', 'native_storage_host_group_id': '2', 'native_storage_host_id': 'dbs03_0'}, {'storage_id': '12345', 'native_storage_host_group_id': '2', 'native_storage_host_id': 'dbs03_1'}]} VOLUME_GROUPS_INFO = """CLI> show lun-groups LUN Group LUN Overlap No. Name Volumes ---- ---------------- ----------- 0 dbs01 20 No CLI> """ VOLUME_DETAILS_INFO = """CLI> show lun-groups -lg-number 0 LUN Group No.0 LUN Group Name dbs01 LUN Volume Status Size(MB)\ LUN Overlap UID No. Name\ Volume ---- ----- -------------------------------- -------------------------\ --------- ----------- -------------------------------- 0 0 LUN00 Available\ 20480 No 600000E00D29000000291B6B00000000 1 1 LUN01 Available\ 20480 No 600000E00D29000000291B6B00010000 2 2 LUN02 Available\ 20480 No 600000E00D29000000291B6B00020000 CLI> """ VOLUME_GROUPS_DATA = { 'volume_groups': [{'name': 'dbs01 20', 'storage_id': '12345', 'native_volume_group_id': '0'}], 'vol_grp_vol_rels': [ {'storage_id': '12345', 'native_volume_group_id': '0', 'native_volume_id': '0'}, {'storage_id': '12345', 'native_volume_group_id': '0', 'native_volume_id': '1'}, {'storage_id': '12345', 'native_volume_group_id': '0', 'native_volume_id': '2'}]} PORT_G_VIEW_INFO = """CLI> show port-groups -all Port Group CA Type No. Name --- ---------------- ------- 0 PortGroup01 FC CM#0 CA#0 Port#0 CM#1 CA#0 Port#0 Port Group CA Type No. Name --- ---------------- ------- 1 PortGroup02 FC CM#0 CA#0 Port#1 CM#1 CA#0 Port#1 Port Group CA Type No. Name --- ---------------- ------- 2 PortGroup03 FC CM#0 CA#1 Port#0 CM#1 CA#1 Port#0 CLI>""" PORT_G_DATA = { 'port_groups': [{'name': 'PortGroup01', 'storage_id': '12345', 'native_port_group_id': '0'}, {'name': 'PortGroup02', 'storage_id': '12345', 'native_port_group_id': '1'}, {'name': 'PortGroup03', 'storage_id': '12345', 'native_port_group_id': '2'}], 'port_grp_port_rels': [ {'storage_id': '12345', 'native_port_group_id': '0', 'native_port_id': 'CM#0 CA#0 Port#0'}, {'storage_id': '12345', 'native_port_group_id': '0', 'native_port_id': 'CM#1 CA#0 Port#0'}, {'storage_id': '12345', 'native_port_group_id': '1', 'native_port_id': 'CM#0 CA#0 Port#1'}, {'storage_id': '12345', 'native_port_group_id': '1', 'native_port_id': 'CM#1 CA#0 Port#1'}, {'storage_id': '12345', 'native_port_group_id': '2', 'native_port_id': 'CM#0 CA#1 Port#0'}, {'storage_id': '12345', 'native_port_group_id': '2', 'native_port_id': 'CM#1 CA#1 Port#0'}]} MASKING_VIEWS_INFO = """CLI> show host-affinity Port Group Host Group LUN Group LUN Overlap No. Name No. Name No. Name Volumes --- ---------------- --- ---------------- ---- ---------------- ----------- 0 huawie 3 Dorado5000V6 7 test No Port Host No. Name ---------------- ---- ---------------- CM#0 CA#0 Port#1 6 Dorado5000V6_0 CM#0 CA#0 Port#1 7 Dorado5000V6_1 CM#1 CA#0 Port#0 6 Dorado5000V6_0 CM#1 CA#0 Port#0 7 Dorado5000V6_1 Port Group Host Group LUN Group LUN Overlap No. Name No. Name No. Name Volumes --- ---------------- --- ---------------- ---- ---------------- ----------- 0 huawie 10 Dorado5500_V6 9 lun_fujitsu No Port Host No. Name ---------------- ---- ---------------- CM#0 CA#0 Port#1 4 Dorado5500v6_0 CM#0 CA#0 Port#1 5 Dorado5500v6_1 CM#1 CA#0 Port#0 4 Dorado5500v6_0 CM#1 CA#0 Port#0 5 Dorado5500v6_1 Port Group Host Group LUN Group LUN Overlap No. Name No. Name No. Name Volumes --- ---------------- --- ---------------- ---- ---------------- ----------- 0 huawie 12 AIX206 8 new1 No Port Host No. Name ---------------- ---- ---------------- CM#0 CA#0 Port#1 20 AIX206_0 CM#0 CA#0 Port#1 21 AIX206_1 CM#1 CA#0 Port#0 20 AIX206_0 CM#1 CA#0 Port#0 21 AIX206_1 CM#0 CA#0 Port#0 (Host Affinity Mode Enable) Host LUN Group LUN Overlap LUN Mask No. Name No. Name Volumes Group No. ---- ---------------- ---- ---------------- ----------- --------- 1 RH_196_02 1 RH2288_test No - 20 AIX206_0 9 lun_fujitsu No - CM#0 CA#0 Port#1 (Host Affinity Mode Enable) CM#1 CA#0 Port#0 (Host Affinity Mode Enable) Host LUN Group LUN Overlap LUN Mask No. Name No. Name Volumes Group No. ---- ---------------- ---- ---------------- ----------- --------- 2 RH197_0 5 RH196 Yes - CM#1 CA#0 Port#1 (Host Affinity Mode Disable) CLI>""" GET_MAPPING = """CLI> show mapping CM#0 CA#0 Port#0 (Host Affinity Mode Enable) CM#0 CA#0 Port#1 (Host Affinity Mode Enable) CM#0 CA#1 Port#0 (Host Affinity Mode Enable) CM#0 CA#1 Port#1 (Host Affinity Mode Disable) LUN Volume Status Size(MB) No. Name ---- ----- -------------------------------- ------------------------- --------- 0 3 LUN03 Available 20480 1 6 lun051 Available 2048 CM#1 CA#0 Port#0 (Host Affinity Mode Enable) CM#1 CA#0 Port#1 (Host Affinity Mode Enable) CM#1 CA#1 Port#0 (Host Affinity Mode Enable) CM#1 CA#1 Port#1 (Host Affinity Mode Disable) LUN Volume Status Size(MB) No. Name ---- ----- -------------------------------- ------------------------- --------- 1 5 lun050 Available 2048 CLI>""" MASKING_VIEWS_DATA = [ {'native_masking_view_id': '37host_idvolume_id', 'name': '37host_idvolume_id', 'native_storage_host_group_id': '3', 'native_port_group_id': '0', 'native_volume_group_id': '7', 'storage_id': '12345'}, {'native_masking_view_id': '109host_idvolume_id', 'name': '109host_idvolume_id', 'native_storage_host_group_id': '10', 'native_port_group_id': '0', 'native_volume_group_id': '9', 'storage_id': '12345'}, {'native_masking_view_id': '128host_idvolume_id', 'name': '128host_idvolume_id', 'native_storage_host_group_id': '12', 'native_port_group_id': '0', 'native_volume_group_id': '8', 'storage_id': '12345'}, {'native_masking_view_id': 'host_group_id1RH_196_02volume_id', 'name': 'host_group_id1RH_196_02volume_id', 'native_storage_host_id': 'RH_196_02', 'native_volume_group_id': '1', 'native_port_id': 'CM#0 CA#0 Port#0', 'storage_id': '12345'}, {'native_masking_view_id': 'host_group_id9AIX206_0volume_id', 'name': 'host_group_id9AIX206_0volume_id', 'native_storage_host_id': 'AIX206_0', 'native_volume_group_id': '9', 'native_port_id': 'CM#0 CA#0 Port#0', 'storage_id': '12345'}, {'native_masking_view_id': 'host_group_id5RH197_0volume_id', 'name': 'host_group_id5RH197_0volume_id', 'native_storage_host_id': 'RH197_0', 'native_volume_group_id': '5', 'native_port_id': 'CM#1 CA#0 Port#0', 'storage_id': '12345'}] PARSE_ALERT_DATA = { 'alert_id': '123456', 'severity': 'Fatal', 'category': 'Fault', 'occur_time': 1644827799328, 'description': 'cm0 error', 'location': 'cm0#eterus-213546', 'type': 'EquipmentAlarm', 'resource_type': 'Storage', 'alert_name': 'cm0 error', 'match_key': 'e10adc3949ba59abbe56e057f20f883e'} PORTS_OLD_DATA = [ {'name': 'CM#0 Port#0', 'storage_id': '12345', 'native_port_id': 'CM#0 Port#0', 'location': 'CM#0 Port#0', 'type': 'fc', 'speed': 4000000000, 'connection_status': 'connected', 'wwn': '500000E0D0376706', 'health_status': 'normal'}, {'name': 'CM#0 Port#1', 'storage_id': '12345', 'native_port_id': 'CM#0 Port#1', 'location': 'CM#0 Port#1', 'type': 'fc', 'speed': None, 'connection_status': 'disconnected', 'wwn': '500000E0D0376707', 'health_status': 'normal'}, {'name': 'CM#1 Port#0', 'storage_id': '12345', 'native_port_id': 'CM#1 Port#0', 'location': 'CM#1 Port#0', 'type': 'fc', 'speed': None, 'connection_status': 'disconnected', 'wwn': '500000E0D0376786', 'health_status': 'normal'}, {'name': 'CM#1 Port#1', 'storage_id': '12345', 'native_port_id': 'CM#1 Port#1', 'location': 'CM#1 Port#1', 'type': 'fc', 'speed': 4000000000, 'connection_status': 'connected', 'wwn': '500000E0D0376787', 'health_status': 'normal'}] PORTS_DATA = [{'name': 'CM#0 CA#0 Port#0', 'storage_id': '12345', 'native_port_id': 'CM#0 CA#0 Port#0', 'location': 'CM#0 CA#0 Port#0', 'type': 'fc', 'speed': 10000000000, 'connection_status': 'unknown', 'wwn': '500000E0DA0A7D20', 'health_status': 'unknown'}, {'name': 'CM#0 CA#0 Port#1', 'storage_id': '12345', 'native_port_id': 'CM#0 CA#0 Port#1', 'location': 'CM#0 CA#0 Port#1', 'type': 'fc', 'speed': 10000000000, 'connection_status': 'unknown', 'wwn': '500000E0DA0A7D21', 'health_status': 'unknown'}, {'name': 'CM#0 CA#1 Port#0', 'storage_id': '12345', 'native_port_id': 'CM#0 CA#1 Port#0', 'location': 'CM#0 CA#1 Port#0', 'type': 'fc', 'speed': 10000000000}, {'name': 'CM#0 CA#1 Port#1', 'storage_id': '12345', 'native_port_id': 'CM#0 CA#1 Port#1', 'location': 'CM#0 CA#1 Port#1', 'type': 'fc', 'speed': 10000000000}] DISKS_OLD = [ {'name': 'CE-Disk#0', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#0', 'serial_number': '6SK2CEG91327', 'manufacturer': 'SEAGATE', 'model': '3.5" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#0', 'speed': 15000, 'capacity': 483183820800.0, 'status': 'abnormal', 'physical_type': 'sas', 'logical_type': 'unknown'}, {'name': 'CE-Disk#1', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#1', 'serial_number': '6SK262SZ1312', 'manufacturer': 'SEAGATE', 'model': '3.5" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#1', 'speed': 15000, 'capacity': 483183820800.0, 'status': 'abnormal', 'physical_type': 'sas', 'logical_type': 'unknown'}, {'name': 'CE-Disk#2', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#2', 'serial_number': '6SK26QCA1312', 'manufacturer': 'SEAGATE', 'model': '3.5" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#2', 'speed': 15000, 'capacity': 483183820800.0, 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member'}, {'name': 'CE-Disk#3', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#3', 'serial_number': '6SK2DE941330', 'manufacturer': 'SEAGATE', 'model': '3.5" SAS', 'firmware': 'GF0D', 'location': 'CE-Disk#3', 'speed': 15000, 'capacity': 483183820800.0, 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member'}] DISKS_DATA = [ {'name': 'CE-Disk#0', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#0', 'serial_number': '0QWA91YA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M', 'firmware': 'H603', 'location': 'CE-Disk#0', 'speed': None, 'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd', 'logical_type': 'member'}, {'name': 'CE-Disk#1', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#1', 'serial_number': '0QWAHN1A', 'manufacturer': 'HGST', 'model': '2.5 SSD-M', 'firmware': 'H603', 'location': 'CE-Disk#1', 'speed': None, 'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd', 'logical_type': 'member'}, {'name': 'CE-Disk#2', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#2', 'serial_number': '0QWA9GMA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M', 'firmware': 'H603', 'location': 'CE-Disk#2', 'speed': None, 'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd', 'logical_type': 'member'}, {'name': 'CE-Disk#3', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#3', 'serial_number': '0QWA9KJA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M', 'firmware': 'H603', 'location': 'CE-Disk#3', 'speed': None, 'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd', 'logical_type': 'member'}, {'name': 'CE-Disk#4', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#4', 'serial_number': '0QWAHMAA', 'manufacturer': 'HGST', 'model': '2.5 SSD-M', 'firmware': 'H603', 'location': 'CE-Disk#4', 'speed': None, 'capacity': 429496729600.0, 'status': 'normal', 'physical_type': 'ssd', 'logical_type': 'member'}, {'name': 'CE-Disk#5', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#5', 'serial_number': 'S7M1LC92', 'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#5', 'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}, {'name': 'CE-Disk#6', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#6', 'serial_number': 'W7M0M8PR', 'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#6', 'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}, {'name': 'CE-Disk#7', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#7', 'serial_number': 'S7M1LC99', 'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#7', 'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}, {'name': 'CE-Disk#8', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#8', 'serial_number': 'S7M1L3XD', 'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#8', 'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}, {'name': 'CE-Disk#9', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#9', 'serial_number': 'S7M1KXS5', 'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#9', 'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}, {'name': 'CE-Disk#10', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#10', 'serial_number': 'S7M1KCPD', 'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#10', 'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}, {'name': 'CE-Disk#11', 'storage_id': '12345', 'native_disk_id': 'CE-Disk#11', 'serial_number': 'W7M0MYYA', 'manufacturer': 'SEAGATE', 'model': '2.5 Online', 'firmware': 'VE0C', 'location': 'CE-Disk#11', 'speed': 15000, 'capacity': 644245094400.0, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'member'}] PARSE_ALERT_INFO = { '1.3.6.1.2.1.1.3.0': '123456', '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.211.4.1.1.126.1.150.0.2', '1.3.6.1.4.1.211.1.21.1.150.7.0': '-213546', '1.3.6.1.4.1.211.1.21.1.150.1.1.0': 'cm0#eterus', '1.3.6.1.4.1.211.1.21.1.150.11.0': 'cm0 error' } def create_driver(): EternusSSHPool.do_exec_shell = mock.Mock( side_effect=["Summary Status [Normal]"]) return EternusDriver(**ACCESS_INFO) class TestEternusDriver(TestCase): driver = create_driver() def test_get_storage(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[STORAGE_NAME_DATA, STORAGE_MODEL_DATA, STORAGE_STATUS_DATA, DISK_LIST_INFO, POOL_DATAS]) storage = self.driver.get_storage(context) self.assertDictEqual(storage, STORAGE_RESULT) def test_list_storage_pools(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[POOL_DATAS]) pools = self.driver.list_storage_pools(context) self.assertDictEqual(pools[0], POOL_RESULT[0]) def test_list_storage_pools_old(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[ POOL_ERROR_DATAS, POOL_OLD_DATAS]) pools = self.driver.list_storage_pools(context) self.assertDictEqual(pools[0], POOL_old_RESULT[0]) def test_list_volumes(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[VOLUMES, VOLUME_TPV_DATAS, VOLUME_FTV_DATAS]) volumes = self.driver.list_volumes(context) self.assertDictEqual(volumes[0], VOLUME_RESULT[0]) def test_list_volumes_old(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[VOLUMES_ERROR, VOLUMES_ERROR, VOLUMES_ERROR, VOLUME_DATAS]) volumes = self.driver.list_volumes(context) self.assertDictEqual(volumes[0], VOLUME_OLD_RESULT[0]) def test_get_controllers(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[NODE_DATAS, NODE_STATUS_DATAS]) controllers = self.driver.list_controllers(context) self.assertDictEqual(controllers[0], CONTROLLER_RESULT[0]) def test_list_alerts(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[LIST_ALERT_WARNING, LIST_ALERT_ERROR]) list_alerts = self.driver.list_alerts(context) ALERTS_INFO['occur_time'] = list_alerts[0].get('occur_time') ALERTS_INFO['match_key'] = list_alerts[0].get('match_key') self.assertDictEqual(list_alerts[0], ALERTS_INFO) def test_list_alerts_old(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[None, None, LIST_ALERT_WARNING, LIST_ALERT_ERROR]) list_alerts = self.driver.list_alerts(context) ALERTS_INFO['occur_time'] = list_alerts[0].get('occur_time') ALERTS_INFO['match_key'] = list_alerts[0].get('match_key') self.assertDictEqual(list_alerts[0], ALERTS_INFO) def test_list_disks(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[DISK_LIST_INFO]) data = self.driver.list_disks(context) self.assertEqual(data, DISKS_DATA) def test_list_disks_OLD(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock(side_effect=[DISK_OLD]) data = self.driver.list_disks(context) self.assertListEqual(data, DISKS_OLD) def test_list_ports(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[FCOE_INFO, NODE_DATAS]) data = self.driver.list_ports(context) self.assertListEqual(data, PORTS_DATA) def test_list_ports_old(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[FC_INFO_OLD, NODE_DATAS_OLD]) data = self.driver.list_ports(context) self.assertListEqual(data, PORTS_OLD_DATA) def test_parse_alert(self): parse_alert = self.driver.parse_alert(context, PARSE_ALERT_INFO) PARSE_ALERT_DATA['occur_time'] = parse_alert.get('occur_time') self.assertDictEqual(parse_alert, PARSE_ALERT_DATA) def test_list_storage_host_initiators(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[HOST_STATUS_INFO, FC_HOSTS_INFO, ISCSI_HOST_INFO, ISCSI_HOST_DETAIL_ZERO, ISCSI_HOST_DETAIL_ONE, ISCSI_HOST_DETAIL_TWO, SAS_HOST_INFO]) initiators = self.driver.list_storage_host_initiators(context) self.assertListEqual(initiators, INITIATORS_DATA) def test_list_storage_hosts(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[HOST_STATUS_INFO, FC_HOSTS_INFO, ISCSI_HOST_INFO, ISCSI_HOST_DETAIL_ZERO, ISCSI_HOST_DETAIL_ONE, ISCSI_HOST_DETAIL_TWO, SAS_HOST_INFO]) hosts = self.driver.list_storage_hosts(context) self.assertListEqual(hosts, HOSTS_DATA) def test_list_storage_host_groups(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[HOST_GROUPS_INFO]) host_groups = self.driver.list_storage_host_groups(context) self.assertDictEqual(host_groups, HOST_GROUPS_DATA) def test_list_port_groups(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[PORT_G_VIEW_INFO]) host_groups = self.driver.list_port_groups(context) self.assertDictEqual(host_groups, PORT_G_DATA) def test_list_volume_groups(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[VOLUME_GROUPS_INFO, VOLUME_DETAILS_INFO]) volume_groups = self.driver.list_volume_groups(context) self.assertDictEqual(volume_groups, VOLUME_GROUPS_DATA) def test_list_masking_views(self): EternusSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) EternusSSHPool.do_exec_shell = mock.Mock( side_effect=[MASKING_VIEWS_INFO]) masking_views = self.driver.list_masking_views(context) self.assertListEqual(masking_views, MASKING_VIEWS_DATA) ================================================ FILE: delfin/tests/unit/drivers/hitachi/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/hitachi/hnas/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/hitachi/hnas/constants.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ACCESS_INFO = { "storage_id": "12345", "vendor": "hitachi", "model": "hnas", "ssh": { "host": "192.168.3.211", "port": 22, "username": "manager", "password": "manager", } } STORAGE_INFO = """\r cluster-show\r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ cluster-show\r Overall Status = Online\r Cluster Health = Robust\r Cluster Mode = Not clustered\r Cluster Name = pba-hnas-1\r Cluster UUID = a39f815a-e582-11d6-9000-b76f3098a657\r Cluster Size = 1\r Node Name = pba-hnas-1-1\r Node ID = 1\r Cluster GenId = 1\r Cluster Master = No\r \r pba-hnas-1-1:$ """ VERSION_INFO = """\r ver\r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ ver\r \r Model: HNAS 4060\r \r Software: 12.7.4221.12 (built 2016-10-28 21:51:37+01:00)\r \r Hardware: NAS Platform (M4SJKW1423160)\r \r board MMB1\r mmb 12.7.4221.12 release (2016-10-28 21:51:37+01:00)\r \r board MFB2\r mfb2hw MB v0132 WL v0132 TD v0132 FD v0132 TC v00C6 RY v00C6 \r TY v00C6 IC v00C6 WF v007C FS v007C OS v007C WD v007C D0 v0077 \r Serial no B1423125 (Tue Jun 17 13:38:33 2014)\r \r board MCP\r Serial no B1423160 (Wed Jun 18 20:39:53 2014)\r \r pba-hnas-1-1:$ """ LOCATION_INFO = """\r system-information-get\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ system-information-get\r \r Name: pba-hnas-1\r Location: chengdu\r Contact: \r \r pba-hnas-1-1:$ """ DISK_INFO = """\r sd-list --scsi\r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ sd-list --scsi\r Device ID: 0\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span1' (capacity 200GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:00\r Blocksize: 512\r Superflush: Default\r Lun: 0\r Serial number: 212902\r Site ID: 0\r Tier: 1\r HDS ctrlr port: 0000\r HDS dev name: 1000\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 1\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span1' (capacity 200GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:01\r Blocksize: 512\r Superflush: Default\r Lun: 1\r Serial number: 212902\r Site ID: 0\r Tier: 1\r HDS ctrlr port: 0400\r HDS dev name: 1001\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 2\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span1' (capacity 200GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:02\r Blocksize: 512\r Superflush: Default\r Lun: 2\r Serial number: 212902\r Site ID: 0\r Tier: 1\r HDS ctrlr port: 0000\r HDS dev name: 1002\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 3\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span1' (capacity 200GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:03\r Blocksize: 512\r Superflush: Default\r Lun: 3\r Serial number: 212902\r Site ID: 0\r Tier: 1\r HDS ctrlr port: 0400\r HDS dev name: 1003\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 4\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:04\r Blocksize: 512\r Superflush: Default\r Lun: 4\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0000\r HDS dev name: 1004\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 5\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:05\r Blocksize: 512\r Superflush: Default\r Lun: 5\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0400\r HDS dev name: 1005\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 6\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:06\r Blocksize: 512\r Superflush: Default\r Lun: 6\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0000\r HDS dev name: 1006\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 7\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:07\r Blocksize: 512\r Superflush: Default\r Lun: 7\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0400\r HDS dev name: 1007\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 8\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:08\r Blocksize: 512\r Superflush: Default\r Lun: 8\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0400\r HDS dev name: 1008\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 9\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:09\r Blocksize: 512\r Superflush: Default\r Lun: 9\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0000\r HDS dev name: 1009\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 10\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0A\r Blocksize: 512\r Superflush: Default\r Lun: 10\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0400\r HDS dev name: 100A\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r Device ID: 11\r Comment: \r Capacity: 50GiB (53687746560 bytes)\r Status: OK\r Role: Primary\r Access: Allowed\r Used in span: 'span2' (capacity 400GiB)\r Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r Submodel: HM70\r Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0B\r Blocksize: 512\r Superflush: Default\r Lun: 11\r Serial number: 212902\r Site ID: 0\r Tier: None\r HDS ctrlr port: 0000\r HDS dev name: 100B\r HDP pool no: 0\r GAD: No\r Queue depth: min 16, default 32, max 512, configured [default], effective 32\r \r pba-hnas-1-1:$ """ POOL_INFO = """\r span-list\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ span-list\r Span instance name OK? Free Cap/GiB System drives Con\r --------------------- --- ---- ------- ------------------------- ---\r span1 Yes 100% 200 0,1,2,3 90%\r Tier 0: empty: file systems can't be created or mounted\r Tier 1: capacity 200GiB; free: 200GiB (100%); HDP pool free 996GiB\r span2 Yes 86% 400 4,5,6,7;8,9,10,11 90%\r pba-hnas-1-1:$ """ POOL_DETAIL_INFO = """\r \r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ span-space-distribution\r Span span2:\r \r How each stripeset is used:\r Stripeset 0:\r 18GiB 9.09% fs1\r 18GiB 9.09% fs2\r 18GiB 9.09% fs3\r 145GiB 72.74% [Free space]\r Stripeset 1:\r 200GiB 100.00% [Free space]\r \r Where each filesystem resides:\r Filesystem fs1:\r Stripeset 0 18GiB 100.00%\r Filesystem fs2:\r Stripeset 0 18GiB 100.00%\r Filesystem fs3:\r Stripeset 0 18GiB 100.00%\r \r Span span1:\r \r How each stripeset is used:\r Stripeset 0:\r 200GiB 100.00% [Free space]\r \r Where each filesystem resides:\r \r pba-hnas-1-1:$""" ALERT_INFO = """\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ event-log-show -w -s\r ****** Current time : 2021-10-25 11:12:35+08:00 ******\r 8208 Information 2021-11-02 08:26:01+08:00 Chassis device 'md0' is running background media scan.\r CAUSE: Chassis drive volume is running a media check.\r RESOLUTION: No Action required.\r \r 8462 Warning 2021-11-02 08:00:10+08:00 [ pba-hnas-1 ] The SMU does not have an email alert profile relating to a managed server.\r CAUSE: An email alert profile relating to a managed server must be applied to the SMU so that alert and diagnostic emails can be sent to the required recipients.\r RESOLUTION: Go to an SMTP Email Profile page and apply a profile to the SMU.\r \r 8208 Information 2021-11-02 04:04:01+08:00 Chassis device 'md2' is running background media scan.\r CAUSE: Chassis drive volume is running a media check.\r RESOLUTION: No Action required.\r \r 8209 Information 2021-11-02 04:04:00+08:00 Chassis device 'md3' has completed background media scan.\r CAUSE: Chassis drive volume media check has completed.\r RESOLUTION: No Action required.\r \r 9995 Information 2021-11-01 20:50:36+08:00 wq test snmp.\r CAUSE: A test event was requested.\r RESOLUTION: No action required.\r \r\ 3303 Information 2021-11-01 19:27:22+08:00 Exceeded socket backlog: dropping additional connection request from 127.0.0.1:34008->127.0.0.1:206: this event, Id 3303, happened once in the last 6.25 d on the MMB1.\r CAUSE: Socket backlogged: could not allow a new connection.\r RESOLUTION: This is expected behavior on receiving a flurry of connection requests. If it happens in other circumstances, run the Performance Info Report, then report this and send the PIR results to your support provider.\r \r 8208 Information 2021-11-01 16:44:01+08:00 Chassis device 'md3' is running background media scan.\r CAUSE: Chassis drive volume is running a media check.\r RESOLUTION: No Action required.\r \r 8462 Warning 2021-11-01 08:00:10+08:00 [ pba-hnas-1 ] The SMU does not have an email alert profile relating to a managed server.\r CAUSE: An email alert profile relating to a managed server must be applied to the SMU so that alert and diagnostic emails can be sent to the required recipients.\r RESOLUTION: Go to an SMTP Email Profile page and apply a profile to the SMU.\r ****** Current time : 2021-10-25 11:12:35+08:00 ******\r pba-hnas-1-1:$ """ TRAP_INFO = { '1.3.6.1.4.1.11096.6.1.1': "8462 Warning: [ pba-hnas-1 ] The SMU does not have an email alert " "profile relating to a managed server." } NODE_INFO = """Linux pba-hnas-1 2.6.32-5-amd64 #1 SMP Sun Dec 21 18: 01:12 UTC 2014 x86_64\r \r \r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ cluster-show -y\r Ethernet Mgmnt\r ID Node Name Status FS Access Aggs Netwrk FC EVS IDs\r -- --------------- -------- ---------- ---------- ------ --- -------\r 1 pba-hnas-1-1 ONLINE OK Degraded OK OK [0,1,2]\r pba-hnas-1-1:$ """ FC_PORT_INFO = """\r fc-hports\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ fc-hports\r \r Host Port 1\r Addrs: 0x1\r Port name: 50:03:01:70:00:06:8B:01\r Node name: 50:03:01:70:00:06:8B:00 \r FC Link is up\r Status : Good \r \r Host Port 2\r Addrs: not assigned\r Port name: 50:03:01:70:00:06:8B:02\r Node name: 50:03:01:70:00:06:8B:00 \r FC Link is down\r \r Host Port 3\r Addrs: 0x1\r Port name: 50:03:01:70:00:06:8B:03\r Node name: 50:03:01:70:00:06:8B:00 \r FC Link is up\r Status : Good \r \r Host Port 4\r Addrs: not assigned\r Port name: 50:03:01:70:00:06:8B:04\r Node name: 50:03:01:70:00:06:8B:00 \r FC Link is down\r \r pba-hnas-1-1:$ """ FC_PORT_STATUS = """\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ fc-link-speed\r FC 1: 8 Gbps\r FC 2: 4 Gbps\r FC 3: 8 Gbps\r FC 4: 8 Gbps\r pba-hnas-1-1:$ """ ETH_PORT_INFO = """\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ ifconfig\r ag1 Link encap:1 HWaddr 00-30-17-09-fc-08\r inet addr:192.168.0.1 Bcast:192.168.0.255 mask:255.255.255.0\r inet addr:192.168.0.2 Bcast:192.168.0.255 mask:255.255.255.0\r Link:DOWN Admin:UP MTU:1500 Metric:1 txqueuelen:64\r \r ag2 Link encap:1 HWaddr 00-30-17-09-fc-09\r Link:DOWN Admin:DOWN MTU:1500 Metric:1 txqueuelen:64\r \r c1 Link encap:1 HWaddr 00-30-17-09-fc-10\r inet addr:240.152.166.87 Bcast:240.255.255.255 mask:255.0.0.0\r Link:DOWN Admin:UP MTU:1488 Metric:2 txqueuelen:64\r \r c2 Link encap:1 HWaddr 00-30-17-09-fc-11\r Link:DOWN Admin:DOWN MTU:1488 Metric:2 txqueuelen:64\r \r eth0 Link encap:1 HWaddr 0c-c4-7a-05-9e-a0\r inet addr:192.168.3.211 Bcast:192.168.3.255 mask:255.255.255.0\r inet6 addr: fe80::ec4:7aff:fe05:9ea0/64 Scope:Link\r Link:UP Admin:UP MTU:1500 Metric:4 txqueuelen:64\r \r eth1 Link encap:1 HWaddr 0c-c4-7a-05-9e-a1\r inet addr:192.0.2.2 Bcast:192.0.255.255 mask:255.255.0.0\r inet addr:192.0.2.200 Bcast:192.0.255.255 mask:255.255.0.0\r Link:DOWN Admin:UP MTU:1500 Metric:4 txqueuelen:64\r \r lo Link encap:1 \r inet addr:127.0.0.1 Bcast:127.255.255.255 mask:255.0.0.0\r inet6 addr: ::1/128 Scope:Global\r inet6 addr: fe80::200:ff:fe00:0/64 Scope:Link\r Link:UP Admin:UP MTU:1500 Metric:4 txqueuelen:64\r \r pba-hnas-1-1:$ """ FS_INFO = """\r filesystem-list\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ filesystem-list\r Instance name Dev On span State EVS Cap/GiB Confined Flag\r ----------------- ---- ----------- ----- --- ------- -------- ----\r fs1 1024 span2 Mount 1 18 20 \r pba-hnas-1-1:$ """ QTREE_INFO = """\r evs-select 1\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ evs-select 1\r pba-hnas-1-1[EVS1]:$ virtual-volume list --verbose fs1\r tree1\r email : \r root : /12323\r tag : 2\r usage bytes : 0 B files: 1\r last modified: 2021-09-23 07:18:14.714807865+00:00\r vol2\r email : \r root : /123\r tag : 1\r usage bytes : 0 B files: 1\r last modified: 2021-09-15 07:17:02.790323869+00:00\r pba-hnas-1-1[EVS1]:$ """ CIFS_SHARE_INFO = """\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ evs-select 1\r pba-hnas-1-1[EVS1]:$ cifs-share list\r \r Share name: tree1\r Share path: \12323\r Share users: 0\r Share online: Yes\r Share comment: Share associated with Virtual Volume tree1\r Cache options: Manual local caching for documents\r ABE enabled: No\r Continuous Availability: No\r Access snapshots: Yes\r Display snapshots: Yes\r ShadowCopy enabled: Yes\r Lower case on create: No\r Follow symlinks: Yes\r Follow global symlinks: No\r Scan for viruses: Yes\r File system label: fs1\r File system size: 18 GB\r File system free space: 15.6 GB\r File system state: \r formatted = Yes\r mounted = Yes\r failed = No\r thin provisioned = No\r Disaster recovery setting:\r Recovered = No\r Transfer setting = Use file system default\r Home directories: Off\r Mount point options:\r \r Share name: C$\r Share path: \\r Share users: 0\r Share online: Yes\r Share comment: Default share\r Cache options: Manual local caching for documents\r ABE enabled: No\r Continuous Availability: No\r Access snapshots: Yes\r Display snapshots: No\r ShadowCopy enabled: Yes\r Lower case on create: No\r Follow symlinks: Yes\r Follow global symlinks: No\r Scan for viruses: Yes\r File system info: *** not available ***\r Disaster recovery setting:\r Recovered = No\r Transfer setting = Use file system default\r Home directories: Off\r Mount point options:\r \r \r Share name: vol6\r Share path: \666\r Share users: 0\r Share online: No\r Share comment: Share associated with Virtual Volume vol6\r Cache options: Manual local caching for documents\r ABE enabled: No\r Continuous Availability: No\r Access snapshots: Yes\r Display snapshots: Yes\r ShadowCopy enabled: Yes\r Lower case on create: No\r Follow symlinks: Yes\r Follow global symlinks: No\r Scan for viruses: Yes\r File system info: *** not available ***\r Disaster recovery setting:\r Recovered = No\r Transfer setting = Use file system default\r Home directories: Off\r Mount point options:\r \r pba-hnas-1-1[EVS1]:$ """ NFS_SHARE_INFO = """\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ evs-select 1\r pba-hnas-1-1[EVS1]:$ nfs-export list\r \r Export name: /nfs1\r Export path: /\r File system label: fs1\r File system size: 18 GB\r File system free space: 15.6 GB\r File system state: \r formatted = Yes\r mounted = Yes\r failed = No\r thin provisioned = No\r Access snapshots: Yes\r Display snapshots: Yes\r Read Caching: Disabled\r Disaster recovery setting:\r Recovered = No\r Transfer setting = Use file system default\r \r Export configuration:\r 192.168.3.163\r \r \r Export name: /vol6\r Export path: /666\r File system info: *** not available *** \r Access snapshots: Yes\r Display snapshots: Yes\r Read Caching: Disabled\r Disaster recovery setting:\r Recovered = No\r Transfer setting = Use file system default\r \r Export configuration:\r \r \r \r Export name: /vol2\r Export path: /123\r File system label: fs1\r File system size: 18 GB\r File system free space: 15.6 GB\r File system state: \r formatted = Yes\r mounted = Yes\r failed = No\r thin provisioned = No\r Access snapshots: Yes\r Display snapshots: Yes\r Read Caching: Disabled\r Disaster recovery setting:\r Recovered = No\r Transfer setting = Use file system default\r \r Export configuration:\r \r \r pba-hnas-1-1[EVS1]:$ """ FS_DETAIL_INFO = """\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ df -k\r \r ID Label EVS Size Used Snapshots """\ + """ Deduped Avail Thin FS Type \r ---- ----- --- ----------- ---------------- --------- """\ + """ ------- ----------------- ---- ----- \r 1024 fs1 1 18874368 KB 2520544 KB (13%) 0 KB (0%) """\ + """ NA 16353824 KB (87%) No 32 KB,WFS-2,128 DSBs \r \r pba-hnas-1-1:$ """ QUOTA_INFO = """\r \r HDS NAS OS Console\r MAC ID : B7-6F-30-98-A6-57\r \r pba-hnas-1-1:$ evs-select 1\r pba-hnas-1-1[EVS1]:$ quota list fs1\r Type : Explicit\r Target : Group: root\r Usage : 10 GB\r Limit : 1 GB (Soft)\r Warning : 75% (768 MB)\r Critical : 85% (870.4 MB)\r Reset : 5% (51.2 MB)\r File Count : 7\r Limit : 213 (Soft)\r Warning : 75% (159)\r Critical : 85% (181)\r Reset : 5% (10)\r Generate Events : Disabled\r \r Type : Explicit\r Target : User: root\r Usage : 10 GB\r Limit : 1 GB (Soft)\r Warning : 75% (768 MB)\r Critical : 85% (870.4 MB)\r Reset : 5% (51.2 MB)\r File Count : 7\r Limit : 213 (Soft)\r Warning : 75% (159)\r Critical : 85% (181)\r Reset : 5% (10)\r Generate Events : Disabled\r \r Type : Explicit\r Target : ViVol: vol2\r Usage : 0 B\r Limit : 1 GB (Soft)\r Warning : 75% (768 MB)\r Critical : 85% (870.4 MB)\r Reset : 5% (51.2 MB)\r File Count : 1\r Limit : 213 (Soft)\r Warning : 75% (159)\r Critical : 85% (181)\r Reset : 5% (10)\r Generate Events : Disabled\r \r pba-hnas-1-1[EVS1]:$""" ================================================ FILE: delfin/tests/unit/drivers/hitachi/hnas/test_hnas.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import TestCase, mock import paramiko from delfin.tests.unit.drivers.hitachi.hnas import constants from delfin import context from delfin.drivers.hitachi.hnas.hds_nas import HitachiHNasDriver from delfin.drivers.utils.ssh_client import SSHPool class TestHitachiHNasDriver(TestCase): SSHPool.get = mock.Mock({paramiko.SSHClient()}) SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.NODE_INFO]) hnas_client = HitachiHNasDriver(**constants.ACCESS_INFO) @mock.patch.object(HitachiHNasDriver, 'reset_connection') def test_reset_connection(self, reset_connection): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.NODE_INFO, constants.NODE_INFO]) kwargs = constants.ACCESS_INFO hnas_client = HitachiHNasDriver(**kwargs) hnas_client.reset_connection(context, **kwargs) self.assertEqual(reset_connection.call_count, 1) self.assertEqual(hnas_client.nas_handler.ssh_pool.ssh_host, "192.168.3.211") self.assertEqual(hnas_client.nas_handler.ssh_pool.ssh_port, 22) def test_get_storage(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.STORAGE_INFO, constants.VERSION_INFO, constants.LOCATION_INFO, constants.DISK_INFO, constants.POOL_INFO, constants.POOL_DETAIL_INFO]) data = self.hnas_client.get_storage(context) self.assertEqual(data['vendor'], 'Hitachi') def test_list_storage_pools(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.POOL_INFO, constants.POOL_DETAIL_INFO]) data = self.hnas_client.list_storage_pools(context) self.assertEqual(data[0]['name'], 'span1') def test_list_alerts(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.ALERT_INFO]) data = self.hnas_client.list_alerts(context) self.assertEqual(data[0]['alert_name'], '8208') def test_parse_alert(self): data = self.hnas_client.parse_alert(context, constants.TRAP_INFO) self.assertEqual(data['alert_name'], '8462') def test_list_controllers(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.NODE_INFO]) data = self.hnas_client.list_controllers(context) self.assertEqual(data[0]['name'], 'pba-hnas-1-1') def test_list_ports(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.FC_PORT_INFO, constants.FC_PORT_STATUS, constants.ETH_PORT_INFO]) data = self.hnas_client.list_ports(context) self.assertEqual(data[0]['name'], 'FC1') def test_list_disks(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.DISK_INFO]) data = self.hnas_client.list_disks(context) self.assertEqual(data[0]['name'], '1000') def test_list_qtrees(self): SSHPool.do_exec_shell = mock.Mock(side_effect=[ constants.FS_INFO, constants.QTREE_INFO]) data = self.hnas_client.list_qtrees(context) self.assertEqual(data[0]['name'], 'tree1') def test_list_shares(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.FS_INFO, constants.CIFS_SHARE_INFO, constants.NFS_SHARE_INFO, constants.QTREE_INFO]) data = self.hnas_client.list_shares(context) self.assertEqual(data[0]['name'], 'tree1') def test_list_filesystems(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.FS_DETAIL_INFO, constants.FS_INFO]) data = self.hnas_client.list_filesystems(context) self.assertEqual(data[0]['name'], 'fs1') def test_list_quotas(self): SSHPool.do_exec_shell = mock.Mock( side_effect=[constants.FS_INFO, constants.QUOTA_INFO]) data = self.hnas_client.list_quotas(context) self.assertEqual(data[0]['file_soft_limit'], '213') ================================================ FILE: delfin/tests/unit/drivers/hitachi/vsp/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import TestCase, mock sys.modules['delfin.cryptor'] = mock.Mock() from requests import Session from delfin import context from delfin.drivers.hitachi.vsp.rest_handler import RestHandler from delfin.drivers.hitachi.vsp.vsp_stor import HitachiVspDriver class Request: def __init__(self): self.environ = {'delfin.context': context.RequestContext()} pass ACCESS_INFO = { "storage_id": "12345", "rest": { "host": "51.10.192.90", "port": "8443", "username": "username", "password": "cGFzc3dvcmQ=" }, "ssh": { "host": "110.143.132.231", "port": "22", "username": "username", "password": "password", "host_key": "weqewrerwerwerwe" }, "vendor": "hitachi", "model": "vsp", "extra_attributes": { "array_id": "00112233" } } GET_DEVICE_ID = { "data": [ { "storageDeviceId": "800000011633", "model": "VSP F1500", "serialNumber": 11633, "svpIp": "51.10.192.90", } ] } GET_ALL_POOLS = { "data": [ { "poolId": 0, "poolStatus": "POLN", "usedCapacityRate": 56, "snapshotCount": 0, "poolName": "p3-1", "availableVolumeCapacity": 7796586, "totalPoolCapacity": 17821524, "numOfLdevs": 8, "firstLdevId": 4, "warningThreshold": 70, "depletionThreshold": 80, "virtualVolumeCapacityRate": -1, "isMainframe": False, "isShrinking": False, "locatedVolumeCount": 65, "totalLocatedCapacity": 15694896, "blockingMode": "NB", "totalReservedCapacity": 0, "reservedVolumeCount": 0, "poolType": "HDP", "duplicationNumber": 0, "dataReductionAccelerateCompCapacity": 0, "dataReductionCapacity": 0, "dataReductionBeforeCapacity": 0, "dataReductionAccelerateCompRate": 0, "duplicationRate": 0, "compressionRate": 0, "dataReductionRate": 0, "snapshotUsedCapacity": 0, "suspendSnapshot": True } ] } GET_SPECIFIC_STORAGE = { "storageDeviceId": "800000011633", "model": "VSP G350", "serialNumber": 11633, "svpIp": "51.10.192.90", "rmiPort": 1099, "dkcMicroVersion": "80-06-70/00", "communicationModes": [ { "communicationMode": "lanConnectionMode" } ], "isSecure": False } GET_ALL_VOLUMES = { "data": [ { "ldevId": 0, "clprId": 0, "emulationType": "OPEN-V", "byteFormatCapacity": "2.57 T", "blockCapacity": 5538459648, "composingPoolId": 1, "attributes": [ "POOL" ], "raidLevel": "RAID5", "raidType": "3D+1P", "numOfParityGroups": 1, "parityGroupIds": [ "5-1" ], "driveType": "SLB5E-M1R9SS", "driveByteFormatCapacity": "1.74 T", "driveBlockCapacity": 3750000030, "status": "NML", "mpBladeId": 1, "ssid": "0004", "resourceGroupId": 0, "isAluaEnabled": False } ] } GET_ALL_DISKS = { "data": [ { "driveLocationId": "0-0", "driveTypeName": "SSD(FMC)", "driveSpeed": 10000, "totalCapacity": 600, "driveType": "DKR5D-J600SS", "usageType": "DATA", "status": "NML", "parityGroupId": "1-6", "serialNumber": "123456789012345678901" }, { "driveLocationId": "0-1", "driveTypeName": "SAS", "driveSpeed": 10000, "totalCapacity": 600, "driveType": "DKR5D-J600SS", "usageType": "DATA", "status": "NML", "parityGroupId": "1-6", "serialNumber": "123456789012345678902" }, { "driveLocationId": "0-2", "driveTypeName": "SAS", "driveSpeed": 10000, "totalCapacity": 600, "driveType": "DKR5D-J600SS", "usageType": "DATA", "status": "NML", "parityGroupId": "1-6", "serialNumber": "123456789012345678903" }, { "driveLocationId": "0-3", "driveTypeName": "SAS", "driveSpeed": 10000, "totalCapacity": 600, "driveType": "DKR5D-J600SS", "usageType": "DATA", "status": "NML", "parityGroupId": "1-6", "serialNumber": "123456789012345678904" } ] } GET_ALL_CONTROLLERS = { "system": { "powerConsumption": 283 }, "ctls": [ { "location": "CTL1", "status": "Normal", "temperature": 29, "temperatureStatus": "Normal", "type": "Controller Board" }, { "location": "CTL2", "status": "Normal", "temperature": 29, "temperatureStatus": "Normal", "charge": 100, "type": "Controller Board" } ] } TRAP_INFO = { "1.3.6.1.2.1.1.3.0": "0", '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.116.3.11.4.1.1.0.1', '1.3.6.1.4.1.116.5.11.4.2.3': 'eeeeeeeee', '1.3.6.1.4.1.116.5.11.4.2.7': 'ddddddd', '1.3.6.1.4.1.116.5.11.4.2.6': '14:10:10', '1.3.6.1.4.1.116.5.11.4.2.5': '2020/11/20', '1.3.6.1.4.1.116.5.11.4.2.2': ' System Version = 7.4.0.11 ', '1.3.6.1.4.1.116.5.11.4.2.4': '# FRU = None ' } ALERT_INFO = [ { 'location': "test", 'alertId': '223232', 'alertIndex': '1111111', 'errorDetail': 'test alert', 'errorSection': 'someting wrong', 'occurenceTime': '2020-11-20T10:10:10', 'errorLevel': 'Serious' } ] storage_result = { 'name': 'VSP F1500_51.10.192.90', 'vendor': 'Hitachi', 'description': 'Hitachi VSP Storage', 'model': 'VSP F1500', 'status': 'normal', 'serial_number': '11633', 'firmware_version': '80-06-70/00', 'location': '', 'raw_capacity': 18687222349824, 'total_capacity': 18687222349824, 'used_capacity': 10511909388288, 'free_capacity': 8175312961536 } volume_result = [ { 'name': '00:00:00', 'storage_id': '12345', 'description': 'Hitachi VSP volume', 'status': 'normal', 'native_volume_id': '00:00:00', 'native_storage_pool_id': None, 'type': 'thick', 'total_capacity': 2835691339776, 'used_capacity': 2835691339776, 'free_capacity': 0, 'compressed': False, 'deduplicated': False, } ] pool_result = [ { 'name': 'p3-1', 'storage_id': '12345', 'native_storage_pool_id': '0', 'description': 'Hitachi VSP Pool', 'status': 'normal', 'storage_type': 'block', 'total_capacity': 18687222349824, 'used_capacity': 10511909388288, 'free_capacity': 8175312961536, } ] alert_result = [ { 'location': 'test', 'alert_id': '223232', 'sequence_number': '1111111', 'description': 'test alert', 'alert_name': 'someting wrong', 'resource_type': 'Storage', 'occur_time': 1605838210000, 'category': 'Fault', 'type': 'EquipmentAlarm', 'severity': 'Major', } ] trap_alert_result = { 'alert_id': 'eeeeeeeee', 'alert_name': 'ddddddd', 'severity': 'Critical', 'category': 'Fault', 'type': 'EquipmentAlarm', 'occur_time': 1605852610000, 'description': 'ddddddd', 'resource_type': 'Storage', 'location': ' System Version = 7.4.0.11 ', 'match_key': '338d811d532553557ca33be45b6bde55' } controller_result = [ { 'name': 'CTL1', 'storage_id': '12345', 'native_controller_id': 'CTL1', 'status': 'normal', 'location': 'CTL1' }, { 'name': 'CTL2', 'storage_id': '12345', 'native_controller_id': 'CTL2', 'status': 'normal', 'location': 'CTL2' } ] disk_result = [ { 'name': '0-0', 'storage_id': '12345', 'native_disk_id': '0-0', 'serial_number': '123456789012345678901', 'speed': 10000, 'capacity': 644245094400, 'status': 'normal', 'physical_type': 'ssd', 'logical_type': 'member', 'native_disk_group_id': '1-6', 'location': '0-0' }, { 'name': '0-1', 'storage_id': '12345', 'native_disk_id': '0-1', 'serial_number': '123456789012345678902', 'speed': 10000, 'capacity': 644245094400, 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member', 'native_disk_group_id': '1-6', 'location': '0-1' }, { 'name': '0-2', 'storage_id': '12345', 'native_disk_id': '0-2', 'serial_number': '123456789012345678903', 'speed': 10000, 'capacity': 644245094400, 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member', 'native_disk_group_id': '1-6', 'location': '0-2' }, { 'name': '0-3', 'storage_id': '12345', 'native_disk_id': '0-3', 'serial_number': '123456789012345678904', 'speed': 10000, 'capacity': 644245094400, 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member', 'native_disk_group_id': '1-6', 'location': '0-3' } ] GET_ALL_PORTS = { 'data': [ { 'portId': 'CL1-A', 'portType': 'FIBRE', 'portSpeed': 'AUT', 'loopId': 'EF', 'fabricMode': True, 'portConnection': 'PtoP', 'lunSecuritySetting': True, 'wwn': '50060e80124e3b00' }, { 'portId': 'CL1-B', 'portType': 'ISCSI', 'portSpeed': '10G', 'loopId': '00', 'fabricMode': False, 'lunSecuritySetting': True }] } GET_DETAIL_PORT = { 'portId': 'CL1-B', 'portType': 'ISCSI', 'portSpeed': '10G', 'loopId': '00', 'fabricMode': False, 'lunSecuritySetting': True, 'tcpMtu': 1500, 'iscsiWindowSize': '64KB', 'keepAliveTimer': 60, 'tcpPort': '3260', 'ipv4Address': '192.168.116.19', 'ipv4Subnetmask': '255.255.0.0', 'ipv4GatewayAddress': '0.0.0.0', 'ipv6LinkLocalAddress': { 'status': 'INV', 'addressingMode': 'AM', 'address': 'fe80::' }, 'ipv6GlobalAddress': { 'status': 'INV', 'addressingMode': 'AM', 'address': '::' }, 'ipv6GatewayGlobalAddress': { 'status': 'INV', 'address': '::', 'currentAddress': '::' } } port_result = [ { 'name': 'CL1-A', 'storage_id': '12345', 'native_port_id': 'CL1-A', 'location': 'CL1-A', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'fc', 'logical_type': '', 'max_speed': 8589934592, 'mac_address': None, 'wwn': '50060E80124E3B00', 'ipv4': None, 'ipv4_mask': None, 'ipv6': None }, { 'name': 'CL1-B', 'storage_id': '12345', 'native_port_id': 'CL1-B', 'location': 'CL1-B', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'eth', 'logical_type': '', 'max_speed': 10737418240, 'mac_address': None, 'wwn': None, 'ipv4': '192.168.116.19', 'ipv4_mask': '255.255.0.0', 'ipv6': None }] GET_ALL_GROUPS = { "data": [ { "hostGroupId": "CL1-A,0", "portId": "CL1-A", "hostGroupNumber": 0, "hostGroupName": "1A-G00", "hostMode": "LINUX/IRIX" } ] } GET_SINGLE_WWN_GROUP = { "data": [ { "hostGroupId": "CL1-A,0", "portId": "CL1-A", "hostGroupNumber": 0, "hostGroupName": "1A-G00", "hostMode": "LINUX/IRIX" } ] } GET_SINGLE_ISCSI_GROUP = { "data": [ { "hostGroupId": "CL1-A,0", "portId": "CL1-A", "hostGroupNumber": 0, "hostGroupName": "1A-G00", "hostMode": "LINUX/IRIX", "iscsiName": "iqn.ewdhehdhdhh" } ] } GET_HOST_WWN = { "data": [ { "hostWwnId": "CL1-A,0,21000024ff8f5296", "portId": "CL1-A", "hostGroupNumber": 0, "hostGroupName": "1A-G00", "hostWwn": "21000024ff8f5296", "wwnNickname": "-" } ] } GET_HOST_ISCSI = { "data": [ { "hostIscsiId": "CL1-A,0,iqn.ewdhehdhdhh", "portId": "CL1-A", "hostGroupNumber": 0, "hostGroupName": "3C-G00", "iscsiName": "iqn.ewdhehdhdhh", "iscsiNickname": "test_tjy" } ] } GET_LUN_PATH = { "data": [ { "lunId": "CL1-A,1,1", "portId": "CL1-A", "hostGroupNumber": 0, "hostMode": "LINUX/IRIX", "lun": 1, "ldevId": 1 } ] } initator_result = [ { 'name': '21000024ff8f5296', 'storage_id': '12345', 'native_storage_host_initiator_id': '21000024ff8f5296', 'wwn': '21000024ff8f5296', 'status': 'online', 'type': 'fc', 'alias': 'CL1-A', 'native_storage_host_id': 'CL1-A_0_21000024ff8f5296' } ] host_result = [ { 'name': 'test_tjy', 'storage_id': '12345', 'native_storage_host_id': 'CL1-A_0_iqn.ewdhehdhdhh', 'os_type': 'Linux', 'status': 'normal' } ] view_result = [ { 'name': 'CL1-A,1,1', 'native_storage_host_group_id': 'CL1-A_0', 'storage_id': '12345', 'native_volume_id': '00:00:01', 'native_masking_view_id': 'CL1-A_1_1' } ] groups_result = { 'storage_host_groups': [ { 'name': '1A-G00', 'storage_id': '12345', 'native_storage_host_group_id': 'CL1-A_0', 'storage_hosts': 'CL1-A_0_iqn.ewdhehdhdhh' } ], 'storage_host_grp_host_rels': [ { 'storage_id': '12345', 'native_storage_host_group_id': 'CL1-A_0', 'native_storage_host_id': 'CL1-A_0_iqn.ewdhehdhdhh' } ] } def create_driver(): kwargs = ACCESS_INFO RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) m = mock.MagicMock(status_code=200) with mock.patch.object(Session, 'post', return_value=m): m.raise_for_status.return_value = 201 m.json.return_value = { "token": "97c13b8082444b36bc2103026205fa64", "sessionId": 9 } return HitachiVspDriver(**kwargs) class TestHitachiVspStorStorageDriver(TestCase): driver = create_driver() def test_initrest(self): m = mock.MagicMock(status_code=200) with mock.patch.object(Session, 'get', return_value=m): m.raise_for_status.return_value = 200 m.json.return_value = GET_DEVICE_ID kwargs = ACCESS_INFO rh = RestHandler(**kwargs) rh.get_device_id() def test_get_storage(self): RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) RestHandler.get_rest_info = mock.Mock( side_effect=[GET_ALL_POOLS, GET_SPECIFIC_STORAGE]) storage = self.driver.get_storage(context) self.assertDictEqual(storage, storage_result) def test_list_storage_pools(self): RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_POOLS) pool = self.driver.list_storage_pools(context) self.assertDictEqual(pool[0], pool_result[0]) def test_list_volumes(self): RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_VOLUMES) volume = self.driver.list_volumes(context) self.assertDictEqual(volume[0], volume_result[0]) def test_list_alerts(self): with self.assertRaises(Exception) as exc: RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) self.driver.list_alerts(context) self.assertEqual('list_alerts is not supported in model VSP F1500', str(exc.exception)) def test_parse_queried_alerts(self): alert_list = [] HitachiVspDriver.parse_queried_alerts(ALERT_INFO, alert_list) self.assertEqual(alert_list[0].get('alert_id'), alert_result[0].get('alert_id')) def test_parse_alert(self): trap_alert = self.driver.parse_alert(context, TRAP_INFO) trap_alert_result['occur_time'] = trap_alert['occur_time'] self.assertEqual(trap_alert, trap_alert_result) @mock.patch.object(RestHandler, 'call_with_token') def test_get_token(self, mock_token): with self.assertRaises(Exception) as exc: mock_token.return_value = mock.MagicMock( status_code=403, text='KART30005-E') self.driver.rest_handler.get_token() self.assertEqual('Exception from Storage Backend: KART30005-E.', str(exc.exception)) @mock.patch.object(RestHandler, 'get_controllers') def test_list_controllers(self, mock_controller): RestHandler.login = mock.Mock(return_value=None) mock_controller.return_value = GET_ALL_CONTROLLERS controller = HitachiVspDriver(**ACCESS_INFO).list_controllers(context) self.assertEqual(controller, controller_result) @mock.patch.object(RestHandler, 'get_disks') def test_list_disks(self, mock_disk): RestHandler.login = mock.Mock(return_value=None) mock_disk.return_value = GET_ALL_DISKS disk = HitachiVspDriver(**ACCESS_INFO).list_disks(context) self.assertEqual(disk, disk_result) @mock.patch.object(RestHandler, 'get_all_ports') @mock.patch.object(RestHandler, 'get_detail_ports') def test_list_ports(self, mock_detail, mock_all): RestHandler.login = mock.Mock(return_value=None) mock_all.return_value = GET_ALL_PORTS mock_detail.return_value = GET_DETAIL_PORT port = HitachiVspDriver(**ACCESS_INFO).list_ports(context) self.assertEqual(port, port_result) @mock.patch.object(RestHandler, 'get_specific_host_group') @mock.patch.object(RestHandler, 'get_all_host_groups') @mock.patch.object(RestHandler, 'get_host_wwn') def test_host_initiators(self, mock_wwn, mock_groups, mock_group): RestHandler.login = mock.Mock(return_value=None) mock_groups.return_value = GET_ALL_GROUPS mock_group.return_value = GET_SINGLE_WWN_GROUP mock_wwn.return_value = GET_HOST_WWN initiators = HitachiVspDriver( **ACCESS_INFO).list_storage_host_initiators(context) self.assertEqual(initiators, initator_result) @mock.patch.object(RestHandler, 'get_specific_host_group') @mock.patch.object(RestHandler, 'get_all_host_groups') @mock.patch.object(RestHandler, 'get_iscsi_name') def test_hosts(self, mock_iscsi, mock_groups, mock_group): RestHandler.login = mock.Mock(return_value=None) mock_groups.return_value = GET_ALL_GROUPS mock_group.return_value = GET_SINGLE_ISCSI_GROUP mock_iscsi.return_value = GET_HOST_ISCSI hosts = HitachiVspDriver(**ACCESS_INFO).list_storage_hosts(context) self.assertEqual(hosts, host_result) @mock.patch.object(RestHandler, 'get_all_host_groups') @mock.patch.object(RestHandler, 'get_lun_path') def test_masking_views(self, mock_view, mock_groups): RestHandler.login = mock.Mock(return_value=None) mock_groups.return_value = GET_ALL_GROUPS mock_view.return_value = GET_LUN_PATH views = HitachiVspDriver(**ACCESS_INFO).list_masking_views(context) self.assertEqual(views, view_result) @mock.patch.object(RestHandler, 'get_specific_host_group') @mock.patch.object(RestHandler, 'get_all_host_groups') @mock.patch.object(RestHandler, 'get_iscsi_name') def test_host_groups(self, mock_iscsi, mock_groups, mock_group): RestHandler.login = mock.Mock(return_value=None) mock_groups.return_value = GET_ALL_GROUPS mock_group.return_value = GET_SINGLE_ISCSI_GROUP mock_iscsi.return_value = GET_HOST_ISCSI groups = \ HitachiVspDriver(**ACCESS_INFO).list_storage_host_groups(context) self.assertEqual(groups, groups_result) ================================================ FILE: delfin/tests/unit/drivers/hpe/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/hpe/hpe_3par/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import TestCase, mock import paramiko from delfin.common import constants sys.modules['delfin.cryptor'] = mock.Mock() from delfin import exception from delfin import context from delfin.drivers.hpe.hpe_3par.hpe_3parstor import Hpe3parStorDriver from delfin.drivers.hpe.hpe_3par.alert_handler import AlertHandler from delfin.drivers.hpe.hpe_3par.rest_handler import RestHandler from delfin.drivers.hpe.hpe_3par.ssh_handler import SSHHandler from delfin.drivers.utils.rest_client import RestClient from delfin.drivers.utils.ssh_client import SSHPool from requests import Session class Request: def __init__(self): self.environ = {'delfin.context': context.RequestContext()} pass ACCESS_INFO = { "storage_id": "12345", "vendor": "hpe", "model": "3par", "rest": { "host": "10.0.0.1", "port": 8443, "username": "user", "password": "cGFzc3dvcmQ=" }, "ssh": { "host": "110.143.132.231", "port": 22, "username": "user", "password": "cGFzc3dvcmQ=" } } NODE_DATAS = """ Control Data Cache Node --Name--- -State-- Master IC SLED LED Mem(MB) Mem(MB) Available(%) 0 1307327-0 Degraded Yes Yes unknown AmberBlnk 4096 6144 0 1 1307327-1 Degraded No Yes unknown AmberBlnk 4096 6144 0 """ NODE_CPU_DATAS = """ ----------------------------CPUs---------------------------- Node CPU -Manufacturer- -Serial- CPUSpeed(MHz) BusSpeed(MHz) 0 0 GenuineIntel -- 2327 1334.57 0 1 GenuineIntel -- 2327 1334.57 0 2 GenuineIntel -- 2327 1334.57 0 3 GenuineIntel -- 2327 1334.57 1 0 GenuineIntel -- 2327 1332.19 1 1 GenuineIntel -- 2327 1332.19 1 2 GenuineIntel -- 2327 1332.19 1 3 GenuineIntel -- 2327 1332.19 """ NODE_VERSION = """ Node: 0 -------- System serial: 1000183 BIOS version: 4.8.34 OS version: 3.2.2.204 Reset reason: Unknown Node: 1 -------- BIOS version: 4.8.34 OS version: 3.2.2.204 Reset reason: Unknown """ DISK_DATAS = """ ---Size(MB)--- ----Ports---- Id CagePos Type RPM State Total Free A B Cap(GB) 0 0:14:0 FC 15 degraded 571904 83968 0:2:2* ----- 600 1 0:1:0 FC 15 degraded 571904 62720 0:2:2* ----- 600 ----------------------------------------------------------------- 16 total 9150464 912896 """ DISK_I_DATAS = """ Id CagePos State Node_WWN MFR Model Serial FW_Rev Protocol MediaType AdminTime 0 0:14:0 degraded WWN11 MFR111 Model11 Serial111 FW_Rev111 Pl MT1 600 1 0:1:0 degraded WWN22 MFR2222 Model22 Serial222 FW_Rev222 P2 MT2 600 """ PORT_DATAS = """ N:S:P Mode State -Node_WWN- -Port_WWN/HW_Addr- Type Protocol Label Ptner FState 0:0:1 target ready 2FF70002AC001C9F 20010002AC001C9F host FC - 1:0:1 none 0:0:2 target loss_sync 2FF70002AC001C9F 20020002AC001C9F free FC - - - 0:2:2 target loss_sync 2FF70002AC001C9F 20020002AC001C9F free FC - - - 0:6:1 target loss_sync 2FF70002AC001C9F 20020002AC001C9F free FC - - - -------------------------------------------------------------------------- 18 """ PORT_I_DATAS = """ N:S:P Brand Model Rev Firmware Serial HWType 0:0:1 LSI 9205-8e 01 17.11.00.00 SP12430085 SAS 0:0:2 LSI 9205-8e 01 17.11.00.00 SP12430085 FC 0:1:1 QLOGIC QLE2672 02 8.1.1 RFE1228G50820 FC 0:1:2 QLOGIC QLE2672 02 8.1.1 RFE1228G50820 FC 0:2:1 QLOGIC QLE8242 58 4.15.2 PCGLTX0RC1G3PX CNA """ PORT_PER_DATAS = """ N:S:P Connmode ConnType CfgRate MaxRate Class2 UniqNodeWwn VCN Il TMWO SSAN 0:0:1 disk point 6Gbps 6Gbps n/a n/a n/a enabled n/a n/a 0:0:2 disk point 6Gbps 6Gbps n/a n/a n/a enabled n/a n/a 0:1:1 host point auto 16Gbps disabled disabled disabled enabled disabled n/a 0:1:2 host point auto 16Gbps disabled disabled disabled enabled disabled n/a """ PORT_ISCSI_DATAS = """ N:S:P State IPAddr Netmask/PrefixLen Gateway TPGT MTU Rate iAddr iPort ST VLAN 0:2:1 ready 1df9:7b7b:790::21 64 :: 21 1500 10Gbps :: 3205 21 Y 0:2:2 ready 10.99.1.3 255.255.255.0 0.0.0.0 22 1500 10Gbps 0.0.0.0 3205 22 Y """ PORT_RCIP_DATAS = """ N:S:P State ---HwAddr--- IPAddr Netmask Gateway MTU Rate Duplex AutoNeg 0:6:1 loss_sync 0002AC684AAD 10.11.35.10 255.255.0.0 10.11.0.1 900 n/a n/a n/a 1:6:1 offline 0002AC6A3A0F - - - - n/a n/a n/a ----------------------------------------------------------------------------- 2 """ PORT_C_DATAS = """ N:S:P Mode Device Pos Config Topology Rate Cls Mode_change 0:0:1 target RedHat_196 0 valid fabric 8Gbps 3 allowed RedHat_196 0 valid fabric 8Gbps 3 allowe 0:0:2 target Dorado5000V3_F1 0 valid fabric 8Gbps 3 allowed Dorado5000V3_F1 0 valid fabric 8Gbps 3 allowed -------------------------------------------------------------------------- 108 """ POOL_DATAS = ret = { "total": 12, "members": [ { "id": 0, "uuid": "aa43f218-d3dd-4626-948f-8a160b0eac1d", "name": "Lcltest333", "numFPVVs": 21, "numTPVVs": 25, "UsrUsage": { "totalMiB": 1381504, "rawTotalMiB": 1842004, "usedMiB": 1376128, "rawUsedMiB": 712703 }, "SAUsage": { "totalMiB": 140800, "rawTotalMiB": 422400, "usedMiB": 5120, "rawUsedMiB": 15360 }, "SDUsage": { "totalMiB": 388736, "rawTotalMiB": 518315, "usedMiB": 0, "rawUsedMiB": 0 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 4, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 1, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "cpg_Migration1", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 2, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "cpg_Oracle", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 3, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "cpg_filesystem", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 4, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "cpg_test", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 5, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "fs_cpg", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 6, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "ljn2", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 7, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "ljn4_xiuGai", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 8, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "ljn_330", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 9, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "xulin_cpg1", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 10, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "zyz", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 11, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "22", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] } ] } POOL_METRICS_DATAS = { "sampleTime": "2020-03-01T03:50:00+08:00", "sampleTimeSec": 1583005800, "total": 2, "members": [ { "name": "22", "IO": { "read": 0, "write": 0, "total": 10 }, "KBytes": { "read": 0, "write": 0, "total": 0 }, "serviceTimeMS": { "read": 0, "write": 0, "total": 0 }, "IOSizeKB": { "read": 0, "write": 0, "total": 0 }, "queueLength": 0, "busyPct": 0 }, { "name": "Lcltest333", "IO": { "read": 0, "write": 0, "total": 20 }, "KBytes": { "read": 0, "write": 0, "total": 0 }, "serviceTimeMS": { "read": 0, "write": 0, "total": 0 }, "IOSizeKB": { "read": 0, "write": 0, "total": 0 }, "queueLength": 0, "busyPct": 0 } ] } PORT_METRICS_DATAS = """ Time: 2021-07-14 14:10:00 CST (1626243000) ----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes- PORT_N PORT_S PORT_P Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd Wr Tot QLen AvgBusy% 0 0 1 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0.00 0.0 0.0 0.0 0 0.0 0 1 1 0.0 14.3 14.3 0.0 86.4 86.4 0.00 11.52 11.52 0.0 6.1 6.1 1 11.9 ---------------------------------------------------------------------------- 2 7.6 31.4 39.0 0.6 192.0 192.6 0.00 12.34 9.93 0.1 6.2 5.0 1 3.0 """ DISK_METRICS_DATAS = """ Time: 2021-07-14 15:35:00 CST (1626248100) ----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes- PDID Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd Wr Tot QLen AvgBusy% 0 0.0 0.5 0.5 0.0 4.9 4.9 0.00 3.04 3.04 0.0 10.0 10.0 0 0.1 1 0.0 1.6 1.6 0.0 10.2 10.2 0.00 0.89 0.89 0.0 6.3 6.3 0 0.1 ------------------------------------------------------------------------------- 2 0.0 31.4 31.4 0.0 191.4 191.4 0.00 11.98 11.98 0.0 6.2 6.2 0 1.5 """ VOLUME_METRICS_DATAS = """ Time: 2021-07-14 14:10:00 CST (1626243000) ----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes- VVID VV_NAME Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd Wr Tot QLen AvgBusy% 0 srdata 0.0 1.0 2.0 3.0 11.0 22.0 33.00 111.00 222.00 333.0 0.0 0.0 0 0.0 1 admin 0.0 14.3 14.3 0.0 86.4 86.4 0.00 11.52 11.52 0.0 6.1 6.1 1 11.9 ---------------------------------------------------------------------------- 2 7.6 31.4 39.0 0.6 192.0 192.6 0.00 12.34 9.93 0.1 6.2 5.0 1 3.0 """ HOST_GROUP_DATAS = """ Id Name Members Comment 194 HostSet_VMware Host_ESXi6.5_125 -- 229 HostSet_Suse11_Oracle Host_Suse11_8.44.75.122 -- 257 HostGroup_ESX6.0 ESX6.0_8.44.75.145 -- ESX6.0_8.44.75.146 264 HostSet_Win2016_WSFC RH2288V5_Win2016_node2 -- RH2288V5_Win2016_node1 266 HostSet_Win2012_WSFC RH2285_Win2012_wsfc1 -- Rh2285_Win2012_wsfc2 268 HostSet_AIX Host_AIX_51.10.192.20 -- 270 HostSet_Suse11 Host_Suse11_8.44.75.123 -- 274 Suse11sp4_150 litng138.150 -- ----------------------------------------------------------- 32 total 28 """ HOST_ID_DATAS = """ Id Name Persona -WWN/iSCSI_Name- Port IP_addr 175 Host_ESXi6.5_125 Generic 2408244427906812 --- n/a 54 Doradov3_lm Generic 2418244427906812 --- n/a 57 AIX_wenbin AIX-legacy 10000000C9E74BCC --- n/a 65 SKY-ESXI60 Generic 2100001B321BE0FF --- n/a 65 SKY-ESXI60 Generic 2101001B323BE0FF --- n/a 67 zouming Generic 2012E4A8B6B0A1CC --- n/a 67 zouming Generic 2002E4A8B6B0A1CC --- n/a 68 powerpath Generic 21000024FF36D406 --- n/a 68 powerpath Generic 21000024FF36D407 --- n/a 69 power_v3 Generic 20809CE37435D845 --- n/a 69 power_v3 Generic 20909CE37435D845 --- n/a 89 vplex_meta_important Generic 5000144280292012 0:1:2 n/a 89 vplex_meta_important Generic 5000144280292010 0:1:2 n/a 89 vplex_meta_important Generic 5000144290292012 1:1:2 n/a 89 vplex_meta_important Generic 500014429029E910 1:1:2 n/a 89 vplex_meta_important Generic 500014429029E912 1:1:2 n/a 89 vplex_meta_important Generic 500014428029E912 1:1:2 n/a 89 vplex_meta_important Generic 500014428029E910 1:1:2 n/a 89 vplex_meta_important Generic 5000144290292010 1:1:2 n/a 89 vplex_meta_important Generic 5000144290292012 0:1:2 n/a 89 vplex_meta_important Generic 5000144290292010 0:1:2 n/a 89 vplex_meta_important Generic 500014429029E912 0:1:2 n/a 89 vplex_meta_important Generic 500014429029E910 0:1:2 n/a 89 vplex_meta_important Generic 5000144280292012 1:1:2 n/a 89 vplex_meta_important Generic 5000144280292010 1:1:2 n/a 89 vplex_meta_important Generic 500014428029E912 0:1:2 n/a 89 vplex_meta_important Generic 500014428029E910 0:1:2 n/a 91 Dorado5000_51.45 Generic 200080D4A58EA53A --- n/a 91 Dorado5000_51.45 Generic 201080D4A58EA53A --- n/a 98 AIX6.1_LN AIX-legacy 10000000C9781C57 --- n/a 98 AIX6.1_LN AIX-legacy 10000000C9781853 --- n/a 115 huhuihost Generic 2100000E1E1A9B30 --- n/a 121 Dorado5000V3_F3 Generic 201880D4A58EA53A --- n/a 160 host002 Generic 21000024FF41DCF8 --- n/a -- -- -- 21000024FF41DCF7 1:0:2 n/a -- -- -- 21000024FF41DCF6 1:0:2 n/a -- -- -- 21000024FF0CC6CA 0:1:2 n/a -- -- -- 21000024FF0CC6CA 1:1:2 n/a -- -- -- 21000024FF0CBF47 0:1:2 n/a -- -- -- 21000024FF0CBF47 1:1:2 n/a """ VOLUME_GROUP_DATAS = """ Id Name Members Comment 91 wcj_2 wcj_2.0 -- wcj_2.1 wcj_2.2 wcj_2.3 110 HP-Esxi-LUNSet -- -- 124 zhangjun -- -- 126 wcj_1 wcj_1.1 -- 127 wcj_3 wcj_3.0 -- wcj_3.1 128 IBM_SVC -- -- 129 zyz_3parF200_ zyz_3parF200.0 -- zyz_3parF200.1 zyz_3parF200.2 zyz_3parF200.3 130 zyz zyz_2 -- 131 tx -- -- 132 tx9 -- -- 133 wcj_hp_1 -- -- 136 AIX_YG_WYK_LUN AIX_YG_WYK_LUN.0 -- AIX_YG_WYK_LUN.1 AIX_YG_WYK_LUN.2 AIX_YG_WYK_LUN.3 140 st11 -- -- 146 Solaris_lun_group Solaris_LUN1_13G -- solaris_LUN_2_33G 147 wcj_vplex wcj_vplex.0 -- ----------------------------------------------------------- 32 total 28 """ VOLUME_ID_DATAS = """ Id Name Prov Type CopyOf BsId Rd -Detailed_State- Adm Snp Usr VSize 4836 wcj_2.0 tpvv base --- 4836 RW normal 256 512 512 5120 4798 zyz_2 tpvv base --- 4836 RW normal 256 512 512 5120 4797 wcj_3.1 tpvv base --- 4836 RW normal 256 512 512 5120 666 yytest_vv_001 tpvv base --- 4836 RW normal 256 512 512 5120 ------------------------------------------------------------------------ 409 total 51072 158720 3279488 18186240 """ HOST_DATAS = [ { "total": 38, "members": [ { "id": 54, "name": "Doradov3_lm", "descriptors": { "location": "U9-3-B17R_B7", "IPAddr": "100.157.61.100", "os": "ESXI6.0", "model": "RH2288H V3" }, "FCPaths": [ { "wwn": "2408244427906812", "hostSpeed": 0 }, { "wwn": "2418244427906812", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 57, "name": "AIX_wenbin", "FCPaths": [ { "wwn": "10000000C9E74BCC", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 5, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 65, "name": "SKY-ESXI60", "descriptors": { "location": "U9-3-B17R_B7", "IPAddr": "100.157.61.100", "os": "ESXI6.0", "model": "RH2288H V3" }, "FCPaths": [ { "wwn": "2100001B321BE0FF", "hostSpeed": 0 }, { "wwn": "2101001B323BE0FF", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 67, "name": "zouming", "FCPaths": [ { "wwn": "2012E4A8B6B0A1CC", "hostSpeed": 0 }, { "wwn": "2002E4A8B6B0A1CC", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 68, "name": "powerpath", "FCPaths": [ { "wwn": "21000024FF36D406", "hostSpeed": 0 }, { "wwn": "21000024FF36D407", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 69, "name": "power_v3", "FCPaths": [ { "wwn": "20809CE37435D845", "hostSpeed": 0 }, { "wwn": "20909CE37435D845", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 89, "name": "vplex_meta_important", "FCPaths": [ { "wwn": "5000144280292012", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144280292010", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292012", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E910", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E912", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E912", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E910", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292010", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292012", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292010", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E912", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E910", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144280292012", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144280292010", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E912", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E910", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 91, "name": "Dorado5000_51.45", "FCPaths": [ { "wwn": "200080D4A58EA53A", "hostSpeed": 0 }, { "wwn": "201080D4A58EA53A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 98, "name": "AIX6.1_LN", "descriptors": { "os": "AIX" }, "FCPaths": [ { "wwn": "10000000C9781C57", "hostSpeed": 0 }, { "wwn": "10000000C9781853", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 5, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 115, "name": "huhuihost", "descriptors": { "os": "SuSE" }, "FCPaths": [ { "wwn": "2100000E1E1A9B30", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 121, "name": "Dorado5000V3_F3", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "201880D4A58EA53A", "hostSpeed": 0 }, { "wwn": "200380D4A58EA53A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 122, "name": "DYP_RHEL", "descriptors": { "IPAddr": "100.157.18.22", "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "10000090FA76D446", "hostSpeed": 0 }, { "wwn": "10000090FA76D447", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 123, "name": "DYP_Dorado6000", "FCPaths": [ { "wwn": "2618346AC212FB94", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 124, "name": "tool_rhel6.8", "FCPaths": [ { "wwn": "21000024FF543687", "hostSpeed": 0 }, { "wwn": "21000024FF543686", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 125, "name": "OceanStor6800", "FCPaths": [ { "wwn": "2430E0979656725A", "hostSpeed": 0 }, { "wwn": "2208E0979656725A", "hostSpeed": 0 }, { "wwn": "2218E0979656725A", "hostSpeed": 0 }, { "wwn": "2428E0979656725A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 126, "name": "fyc_test", "FCPaths": [ { "wwn": "21000024FF41DE7E", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 127, "name": "huhui", "descriptors": { "os": "SuSE" }, "FCPaths": [ { "wwn": "500601610864241E", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 132, "name": "ESX8.44.161.152", "descriptors": { "os": "ESX 4.x/5.x" }, "FCPaths": [ { "wwn": "21000024FF2F3266", "hostSpeed": 0 }, { "wwn": "21000024FF2F3267", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 8, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 133, "name": "ESX89PT_suse_8.44.190.111", "descriptors": { "os": "SuSE" }, "FCPaths": [ { "wwn": "21000024FF36F1ED", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 134, "name": "SVC", "descriptors": { "os": "Exanet" }, "FCPaths": [ { "wwn": "500507680110EF7C", "hostSpeed": 0 }, { "wwn": "500507680120EF7C", "hostSpeed": 0 }, { "wwn": "500507680120EF3E", "hostSpeed": 0 }, { "wwn": "500507680110EF3E", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 3, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 135, "name": "NSS_8.44.162.50", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "21000024FF0DC381", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 137, "name": "D185_8.44.143.201", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "29A11603042D0306", "hostSpeed": 0 }, { "wwn": "28D01603042D0306", "hostSpeed": 0 }, { "wwn": "2903010203040509", "hostSpeed": 0 }, { "wwn": "2802010203040509", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 139, "name": "Dorado3000V6", "FCPaths": [ { "wwn": "2019CC64A68314D3", "hostSpeed": 0 }, { "wwn": "2009CC64A68314D3", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 141, "name": "8.44.143.27T2", "FCPaths": [ { "wwn": "10000090FA50C4DF", "hostSpeed": 0 }, { "wwn": "10000090FA50C4DE", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 142, "name": "8.44.143.27T1", "FCPaths": [], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 144, "name": "C61_51.10.58.190", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "2210112224901223", "hostSpeed": 0 }, { "wwn": "2200112224901223", "hostSpeed": 0 }, { "wwn": "2230112224901223", "hostSpeed": 0 }, { "wwn": "2220112224901223", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 145, "name": "8.44.43.19", "FCPaths": [ { "wwn": "21000024FF754606", "hostSpeed": 0 }, { "wwn": "21000024FF1A99E1", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 146, "name": "ZTY_win2012", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "21000024FF40272B", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "21000024FF40272A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 147, "name": "DoradoV6_183", "FCPaths": [ { "wwn": "240B121314151617", "hostSpeed": 0 }, { "wwn": "2409121314151617", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 148, "name": "rhev_125", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "21000024FF4BC1B7", "hostSpeed": 0 }, { "wwn": "21000024FF4BC1B6", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 150, "name": "windows2012_68", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "2101001B32B0667A", "hostSpeed": 0 }, { "wwn": "2100001B3290667A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 151, "name": "Dorado5000V6_80", "FCPaths": [ { "wwn": "2001183D5E0F5131", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "2011183D5E0F5131", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 152, "name": "windows2012_60", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "21000024FF53B4BC", "hostSpeed": 0 }, { "wwn": "21000024FF53B4BD", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 153, "name": "aix_8.44.134.204", "descriptors": { "os": "AIX" }, "FCPaths": [ { "wwn": "10000000C975804C", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "10000000C9765E79", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 5, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 154, "name": "Dorado5500_V6_109", "descriptors": { "IPAddr": "8.44.133.82", "os": "Windows 2012" }, "FCPaths": [ { "wwn": "221818022D189653", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "220818022D189653", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 155, "name": "aix134.205", "descriptors": { "IPAddr": "8.44.134.205", "os": "AIX" }, "FCPaths": [ { "wwn": "20000000C9781C81", "hostSpeed": 0 }, { "wwn": "10000000C9781C0C", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 5, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 158, "name": "hsv6", "FCPaths": [ { "wwn": "28130A2B304438A8", "hostSpeed": 0 }, { "wwn": "28120A2B304438A8", "hostSpeed": 0 }, { "wwn": "28F20A2B304438A8", "hostSpeed": 0 }, { "wwn": "28F30A2B304438A8", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "FCPaths": [ { "wwn": "21000024FF41DCF7", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "21000024FF41DCF6", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "21000024FF0CC6CA", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "21000024FF0CC6CA", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "21000024FF0CBF47", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "21000024FF0CBF47", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "initiatorChapEnabled": False, "targetChapEnabled": False } ] } ] VIEW_DATAS = """ Lun VVName HostName -Host_WWN/iSCSI_Name- Port Type 2 yytest_vv_001 host002 ---------------- 0:2:1 host 0 set:vvset001 set:hostset111 ---------------- 1:2:1 host set -------------------------------------------------------------------- 2 total """ CONTROLLER_RESULT = [ { 'name': '1307327-0', 'storage_id': '12345', 'native_controller_id': '0', 'status': 'degraded', 'location': None, 'soft_version': '3.2.2.204', 'cpu_info': '4 * 2327 MHz', 'cpu_count': 4, 'memory_size': '10737418240' }] DISK_RESULT = [ { 'name': '0:14:0', 'storage_id': '12345', 'native_disk_id': '0', 'serial_number': 'Serial111', 'manufacturer': 'MFR111', 'model': 'Model11', 'firmware': 'FW_Rev111', 'speed': 15000, 'capacity': 599684808704, 'status': 'degraded', 'physical_type': 'fc', 'logical_type': None, 'health_score': None, 'native_disk_group_id': None, 'location': '0:14:0' }] PORT_RESULT = [ { 'name': '0:0:1', 'storage_id': '12345', 'native_port_id': '0:0:1', 'location': '0:0:1', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'sas', 'logical_type': None, 'speed': 8000000000, 'max_speed': 6000000000, 'native_parent_id': None, 'wwn': '20010002AC001C9F', 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None }] METRICS_RESULT = [ constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'storagePool', 'resource_id': '11', 'type': 'RAW', 'unit': 'IOPS'}, values={1583005800000: 10} ), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'type': 'RAW', 'unit': 'IOPS'}, values={1626243000000: 2.0} ), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '0:0:1', 'type': 'RAW', 'unit': 'IOPS' }, values={1626243000000: 0.0} ), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'type': 'RAW', 'unit': 'IOPS' }, values={1626248100000: 0.5} ), ] HOST_GROUP_RESULT = [ { 'name': 'HostSet_VMware', 'description': '', 'storage_id': '12345', 'native_storage_host_group_id': '194' }] VOLUME_GROUP_RESULT = [ { 'name': 'wcj_2', 'description': '', 'storage_id': '12345', 'native_volume_group_id': '91' }] PORT_GROUP_RESULT = [ { 'name': 'port_group_0:2:1', 'description': 'port_group_0:2:1', 'storage_id': '12345', 'native_port_group_id': 'port_group_0:2:1' }] HOST_RESULT = [ { 'name': 'Doradov3_lm', 'description': None, 'storage_id': '12345', 'native_storage_host_id': 54, 'os_type': 'VMware ESX', 'status': 'normal', 'ip_address': '100.157.61.100' }] INITIATOR_RESULT = [ { 'name': '2408244427906812', 'storage_id': '12345', 'native_storage_host_initiator_id': '2408244427906812', 'wwn': '2408244427906812', 'type': 'fc', 'status': 'online', 'native_storage_host_id': '175' }] VIEW_RESULT = [ { 'native_masking_view_id': '2_0:2:1_host002_yytest_vv_001', 'name': '2', 'storage_id': '12345', 'native_port_group_id': 'port_group_0:2:1', 'native_volume_id': '666', 'native_storage_host_id': '160' }] def create_driver(): kwargs = ACCESS_INFO SSHHandler.login = mock.Mock( return_value={"result": "success", "reason": "null"}) m = mock.MagicMock(status_code=201) with mock.patch.object(Session, 'post', return_value=m): m.raise_for_status.return_value = 201 m.json.return_value = { 'key': 'deviceid123ABC456' } return Hpe3parStorDriver(**kwargs) class TestHpe3parStorageDriver(TestCase): def test_a_init(self): kwargs = ACCESS_INFO SSHHandler.login = mock.Mock( return_value={""}) RestHandler.login = mock.Mock( return_value={""}) Hpe3parStorDriver(**kwargs) def test_b_initrest(self): m = mock.MagicMock() with mock.patch.object(Session, 'post', return_value=m): m.raise_for_status.return_value = 201 m.json.return_value = { 'key': '1&2F28CA9FC1EA0B8EAB80E9D8FD' } kwargs = ACCESS_INFO rc = RestClient(**kwargs) RestHandler(rc) def test_d_get_storage(self): driver = create_driver() expected = { 'name': 'hp3parf200', 'vendor': 'HPE', 'model': 'InServ F200', 'status': 'abnormal', 'serial_number': '1307327', 'firmware_version': '3.1.2.484', 'location': None, 'total_capacity': 7793486594048, 'raw_capacity': 9594956939264, 'used_capacity': 6087847706624, 'free_capacity': 1705638887424 } ret = { "id": 7327, "name": "hp3parf200", "systemVersion": "3.1.2.484", "IPv4Addr": "100.157.92.213", "model": "InServ F200", "serialNumber": "1307327", "totalNodes": 2, "masterNode": 0, "onlineNodes": [ 0, 1 ], "clusterNodes": [ 0, 1 ], "chunkletSizeMiB": 256, "totalCapacityMiB": 9150464, "allocatedCapacityMiB": 5805824, "freeCapacityMiB": 1626624, "failedCapacityMiB": 1718016, "timeZone": "Asia/Shanghai" } RestHandler.get_capacity = mock.Mock( return_value={ "allCapacity": { "totalMiB": 9150464, "allocated": { "system": { "totalSystemMiB": 1232384, "internalMiB": 303104, "spareMiB": 929280, "spareUsedMiB": 307456, "spareUnusedMiB": 621824 } } } } ) health_state = 'PDs that are degraded' SSHHandler.get_health_state = mock.Mock(return_value=health_state) m = mock.MagicMock(status_code=200) with mock.patch.object(RestHandler, 'call', return_value=m): m.raise_for_status.return_value = 200 m.json.return_value = ret storage = driver.get_storage(context) self.assertDictEqual(storage, expected) def test_e_list_storage_pools(self): driver = create_driver() expected = [ { 'name': 'test', 'storage_id': '12345', 'native_storage_pool_id': '0', 'description': 'Hpe 3par CPG:test', 'status': 'normal', 'storage_type': 'block', 'total_capacity': 2003870679040, 'subscribed_capacity': 2917892358144, 'used_capacity': 1448343502848, 'free_capacity': 555527176192 }, { 'name': 'cxd', 'storage_id': '12345', 'native_storage_pool_id': '1', 'description': 'Hpe 3par CPG:cxd', 'status': 'normal', 'storage_type': 'block', 'total_capacity': 1744025157632, 'subscribed_capacity': 2200095948800, 'used_capacity': 1696512081920, 'free_capacity': 47513075712 } ] ret = [ { "total": 2, "members": [ { "id": 0, "uuid": "aa43f218-d3dd-4626-948f-8a160b0eac1d", "name": "test", "numFPVVs": 21, "numTPVVs": 25, "UsrUsage": { "totalMiB": 1381504, "rawTotalMiB": 1842004, "usedMiB": 1376128, "rawUsedMiB": 712703 }, "SAUsage": { "totalMiB": 140800, "rawTotalMiB": 422400, "usedMiB": 5120, "rawUsedMiB": 15360 }, "SDUsage": { "totalMiB": 388736, "rawTotalMiB": 518315, "usedMiB": 0, "rawUsedMiB": 0 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 4, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] }, { "id": 1, "uuid": "c392910e-7648-4972-b594-47dd3d28f3ec", "name": "cxd", "numFPVVs": 14, "numTPVVs": 319, "UsrUsage": { "totalMiB": 1418752, "rawTotalMiB": 1702500, "usedMiB": 1417984, "rawUsedMiB": 568934 }, "SAUsage": { "totalMiB": 56832, "rawTotalMiB": 170496, "usedMiB": 42752, "rawUsedMiB": 128256 }, "SDUsage": { "totalMiB": 187648, "rawTotalMiB": 225179, "usedMiB": 157184, "rawUsedMiB": 188620 }, "SAGrowth": { "incrementMiB": 8192, "LDLayout": { "HA": 3, "diskPatterns": [ { "diskType": 1 } ] } }, "SDGrowth": { "incrementMiB": 32768, "LDLayout": { "RAIDType": 3, "HA": 3, "setSize": 6, "chunkletPosPref": 1, "diskPatterns": [ { "diskType": 1 } ] } }, "state": 1, "failedStates": [], "degradedStates": [], "additionalStates": [] } ] } ] with mock.patch.object(RestHandler, 'get_resinfo_call', side_effect=ret): pools = driver.list_storage_pools(context) self.assertDictEqual(pools[0], expected[0]) self.assertDictEqual(pools[1], expected[1]) with mock.patch.object(RestHandler, 'get_all_pools', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_storage_pools(context) self.assertIn('An unknown exception occurred', str(exc.exception)) def test_f_list_volumes(self): driver = create_driver() expected = [{ 'name': 'admin', 'storage_id': '12345', 'description': None, 'status': 'normal', 'native_volume_id': '0', 'native_storage_pool_id': '', 'wwn': '50002AC000001C9F', 'type': 'thick', 'total_capacity': 10737418240, 'used_capacity': 10737418240, 'free_capacity': 0, 'compressed': True, 'deduplicated': True }] ret = [{ "members": [{ "id": 0, "name": "admin", "provisioningType": 1, "copyType": 1, "baseId": 0, "readOnly": False, "state": 1, "userSpace": { "reservedMiB": 10240, "rawReservedMiB": 20480, "usedMiB": 10240, "freeMiB": 0 }, "sizeMiB": 10240, "wwn": "50002AC000001C9F" }] }] pool_ret = { "members": [{ "id": 0, "uuid": "aa43f218-d3dd-4626-948f-8a160b0eac1d", "name": "test" }] } RestHandler.get_all_pools = mock.Mock(return_value=pool_ret) with mock.patch.object(RestHandler, 'get_resinfo_call', side_effect=ret): volumes = driver.list_volumes(context) self.assertDictEqual(volumes[0], expected[0]) def test_h_parse_alert(self): """ Success flow with all necessary parameters""" driver = create_driver() alert = { 'sysUpTime': '1399844806', 'snmpTrapOID': 'alertNotify', '1.3.6.1.4.1.12925.1.7.1.5.1': 'test_trap', '1.3.6.1.4.1.12925.1.7.1.6.1': 'This is a test trap', 'nodeID': '0', '1.3.6.1.4.1.12925.1.7.1.2.1': '6', '1.3.6.1.4.1.12925.1.7.1.3.1': 'test time', '1.3.6.1.4.1.12925.1.7.1.7.1': '89', '1.3.6.1.4.1.12925.1.7.1.8.1': '2555934', '1.3.6.1.4.1.12925.1.7.1.9.1': '5', 'serialNumber': '1307327', 'transport_address': '100.118.18.100', 'storage_id': '1c094309-70f2-4da3-ac47-e87cc1492ad5' } expected_alert_model = { 'alert_id': '0x027001e', 'alert_name': 'CPG growth non admin limit', 'severity': 'NotSpecified', 'category': 'Recovery', 'type': 'EquipmentAlarm', 'sequence_number': '89', 'description': 'This is a test trap', 'resource_type': 'Storage', 'location': 'test_trap', 'occur_time': '', 'clear_category': 'Automatic' } context = {} alert_model = driver.parse_alert(context, alert) # Verify that all other fields are matching self.assertDictEqual(expected_alert_model, alert_model) def test_list_alert(self): """ Success flow with all necessary parameters""" driver = create_driver() alert = """ Id : 1 State : New MessageCode : 0x2200de Time : 2015-07-17 20:14:29 PDT Severity : Degraded Type : Component state change Message : Node 0, Power Supply 1, Battery 0 Degraded Component: 100.118.18.100 """ expected_alert = [{ 'alert_id': '0x2200de', 'alert_name': 'Component state change', 'severity': 'Warning', 'category': 'Fault', 'type': 'EquipmentAlarm', 'sequence_number': '1', 'occur_time': 1437135269000, 'description': 'Node 0, Power Supply 1, Battery 0 Degraded', 'resource_type': 'Storage', 'location': '100.118.18.100' }] SSHHandler.get_all_alerts = mock.Mock(return_value=alert) alert_list = driver.list_alerts(context, None) expected_alert[0]['occur_time'] = alert_list[0]['occur_time'] self.assertDictEqual(alert_list[0], expected_alert[0]) @mock.patch.object(AlertHandler, 'clear_alert') def test_clear_alert(self, mock_clear_alert): driver = create_driver() alert_id = '230584300921369' driver.clear_alert(context, alert_id) self.assertEqual(mock_clear_alert.call_count, 1) def test_get_controllers(self): driver = create_driver() SSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) SSHPool.do_exec = mock.Mock( side_effect=[NODE_DATAS, NODE_CPU_DATAS, NODE_VERSION]) controllers = driver.list_controllers(context) self.assertDictEqual(controllers[0], CONTROLLER_RESULT[0]) def test_get_disks(self): driver = create_driver() SSHPool.do_exec = mock.Mock(side_effect=[DISK_DATAS, DISK_I_DATAS]) disks = driver.list_disks(context) self.assertDictEqual(disks[0], DISK_RESULT[0]) def test_get_ports(self): driver = create_driver() SSHPool.do_exec = mock.Mock( side_effect=[PORT_DATAS, PORT_I_DATAS, PORT_PER_DATAS, PORT_ISCSI_DATAS, PORT_RCIP_DATAS, PORT_C_DATAS, PORT_RCIP_DATAS, PORT_RCIP_DATAS]) ports = driver.list_ports(context) self.assertDictEqual(ports[0], PORT_RESULT[0]) @mock.patch.object(RestHandler, 'get_pool_metrics') @mock.patch.object(SSHPool, 'do_exec') def test_get_perf_metrics(self, mock_exec, mock_pool): driver = create_driver() resource_metrics = { 'storagePool': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime' ], 'volume': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime', 'ioSize', 'readIoSize', 'writeIoSize', ], 'port': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime' ], 'disk': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime' ], 'filesystem': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'readResponseTime', 'writeResponseTime', 'readIoSize', 'writeIoSize' ] } start_time = 1628472280000 end_time = 1628472900000 RestHandler.get_all_pools = mock.Mock(return_value=POOL_DATAS) mock_pool.return_value = POOL_METRICS_DATAS mock_exec.side_effect = [VOLUME_METRICS_DATAS, PORT_METRICS_DATAS, DISK_METRICS_DATAS] metrics = driver.collect_perf_metrics(context, '12345', resource_metrics, start_time, end_time) self.assertEqual(metrics[0], METRICS_RESULT[0]) self.assertEqual(metrics[14], METRICS_RESULT[1]) self.assertEqual(metrics[34], METRICS_RESULT[2]) self.assertEqual(metrics[48], METRICS_RESULT[3]) def test_get_capabilities(self): driver = create_driver() cap = driver.get_capabilities(context) self.assertIsNotNone(cap.get('resource_metrics')) self.assertIsNotNone(cap.get('resource_metrics').get('storagePool')) self.assertIsNotNone(cap.get('resource_metrics').get('volume')) self.assertIsNotNone(cap.get('resource_metrics').get('port')) self.assertIsNotNone(cap.get('resource_metrics').get('disk')) def test_get_storage_host_groups(self): driver = create_driver() SSHPool.do_exec = mock.Mock(side_effect=[HOST_GROUP_DATAS, HOST_ID_DATAS]) host_groups = driver.list_storage_host_groups(context) self.assertDictEqual(host_groups.get('storage_host_groups')[0], HOST_GROUP_RESULT[0]) def test_get_volume_groups(self): driver = create_driver() SSHPool.do_exec = mock.Mock(side_effect=[VOLUME_GROUP_DATAS, VOLUME_ID_DATAS]) volume_groups = driver.list_volume_groups(context) self.assertDictEqual(volume_groups.get('volume_groups')[0], VOLUME_GROUP_RESULT[0]) def test_storage_hosts(self): driver = create_driver() with mock.patch.object(RestHandler, 'get_resinfo_call', side_effect=HOST_DATAS): storage_hosts = driver.list_storage_hosts(context) self.assertDictEqual(storage_hosts[0], HOST_RESULT[0]) def test_get_storage_host_initiators(self): driver = create_driver() SSHPool.do_exec = mock.Mock(side_effect=[HOST_ID_DATAS]) initiators = driver.list_storage_host_initiators(context) self.assertDictEqual(initiators[0], INITIATOR_RESULT[0]) def test_get_masking_views(self): driver = create_driver() SSHPool.do_exec = mock.Mock( side_effect=[VIEW_DATAS, HOST_ID_DATAS, HOST_GROUP_DATAS, VOLUME_ID_DATAS, VOLUME_GROUP_DATAS]) views = driver.list_masking_views(context) self.assertDictEqual(views[0], VIEW_RESULT[0]) def test_get_port_groups(self): driver = create_driver() SSHPool.do_exec = mock.Mock(side_effect=[VIEW_DATAS]) port_groups = driver.list_port_groups(context) self.assertDictEqual(port_groups.get('port_groups')[0], PORT_GROUP_RESULT[0]) ================================================ FILE: delfin/tests/unit/drivers/hpe/hpe_msa/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/hpe/hpe_msa/test_constans.py ================================================ LIST_CONTROLLERS = """ controller_a A 7CE539M591 4096 6144 GLS210R04-01 Gladden 1 OK Top controller_b B 7CE539M591 4096 6144 GLS210R04-01 Gladden 1 OK Bottom """ LIST_SYSTEM = """ msa2040 00C0FF26DCB0 Uninitialized Location HP MSA 2040 SAN MSA Storage OK """ LIST_VISION = """ GL210R004 """ LIST_PORTS = """ hostport_A1 A1 8Gb FC 207000c0ff26dcb0 N/A 4G,8G hostport_A2 A2 217000c0ff26dcb0 FC 8Gb N/A 4G,8G hostport_A3 A3 iSCSI N/A 0.0.0.0 00:C0:FF:35:BD:64 hostport_A4 A4 iSCSI Auto N/A 0.0.0.0 00:C0:FF:35:BD:65 hostport_B1 B1 247000c0ff26dcb0 FC 8Gb N/A 4G,8G hostport_B2 B2 FC 257000c0ff26dcb0 8Gb N/A 4G,8G hostport_B3 B3 iSCSI Auto N/A 0.0.0.0 00:C0:FF:35:BA:BC hostport_B4 B4 iSCSI Auto N/A 0.0.0.0 00:C0:FF:35:BA:BD """ LIST_POOLS = """ A 00c0ff26c4ea0000d980546101000000 1196.8GB 1196.8GB OK """ LIST_VOLUMES = """ V1 Vol0001 99.9GB 0B 99.9GB 195305472 OK 600C0FF00026C4EAFA80546101000000 base V2 Vol0002 0B 99.9GB 195305472 OK 600C0FF00026C4EA0A81546101000000 base """ LIST_DISKS = """ disk_01.01 1.1 0 6SL9CD560000N51404EF SEAGATE ST3600057SS SAS SAS 15 600.1GB OK dgA01 disk_01.02 1.2 6SL7X4RE0000B42601SF SEAGATE ST3600057SS SAS SAS 15 600.1GB OK dgA01 disk_01.03 1.3 6SL9QR5T0000N52120SK SEAGATE SAS ST3600057SS 15 600.1GB OK dgA01 disk_01.04 0 1.4 SAS 3SL0WT7G00009051YBTF SEAGATE ST3600057SS 15 600.1GB OK dgA01 """ LIST_ERROR = """ 2021-11-12 08:16:20 1636704980 557 A891 MSA 2040 SAN 00C0FF26C236 A 1 ERROR 2 An Enclosure Management Processor(EMP) Management Management """ LIST_HOST_INITIATORS = """ I2 FC-port1 No No HP-UX FC 6 21000024ff3dfed1 NOHOST HU 0 0 I1 FC-port2 No Yes HP-UX FC 6 10000090fa13870e 00c0ff26c2360000e2399f6101010000 H1 0 0 I0 FC-port3 No Yes HP-UX FC 6 10000090fa13870f 00c0ff26c2360000e2399f6101010000 H1 0 0 I6 rac01_01 No Yes Standard FC 6 500143801875548e 00c0ff26c4ea0000057f245b01010000 H4 0 0 I5 rac01_02 No Yes Standard FC 6 5001438012097ed6 00c0ff26c4ea0000057f245b01010000 H4 0 0 I3 rac02_01 No Yes Standard FC 6 50014380029ceb58 00c0ff26c4ea0000f77f245b01010000 H3 0 0 I4 rac02_02 No No Standard FC 6 500143801209031c 00c0ff26c4ea0000f77f245b01010000 H3 0 0 I2 FC-port1 No No HP-UX FC 6 21000024ff3dfed1 NOHOST HU 0 0 """ LIST_HOST_GROUPS = """ HGU -ungrouped- UNGROUPEDHOSTS 0 HU -nohost- NOHOST 0 UNGROUPEDHOSTS HGU I2 FC-port1 No No HP-UX FC 6 21000024ff3dfed1 NOHOST HU 0 0 HG0 HostGroup1 00c0ff26c2360000223a9f6101010000 1 H1 Host1 00c0ff26c2360000e2399f6101010000 2 00c0ff26c2360000223a9f6101010000 HG0 I1 FC-port2 No Yes HP-UX FC 6 10000090fa13870e 00c0ff26c2360000e2399f6101010000 H1 0 0 I0 FC-port3 No Yes HP-UX FC 6 10000090fa13870f 00c0ff26c2360000e2399f6101010000 H1 0 0 HG2 rac 00c0ff26c4ea00008c81245b01010000 2 H4 rac01 00c0ff26c4ea0000057f245b01010000 2 00c0ff26c4ea00008c81245b01010000 HG2 I6 rac01_01 No Yes Standard FC 6 500143801875548e 00c0ff26c4ea0000057f245b01010000 H4 0 0 I5 rac01_02 No Yes Standard FC 6 5001438012097ed6 00c0ff26c4ea0000057f245b01010000 H4 0 0 H3 rac02 00c0ff26c4ea0000f77f245b01010000 2 00c0ff26c4ea00008c81245b01010000 HG2 I3 rac02_01 No Yes Standard FC 6 50014380029ceb58 00c0ff26c4ea0000f77f245b01010000 H3 0 0 I4 rac02_02 No No Standard FC 6 500143801209031c 00c0ff26c4ea0000f77f245b01010000 H3 0 0 HU -nohost- NOHOST 0 UNGROUPEDHOSTS HGU I2 FC-port1 No No HP-UX FC 6 21000024ff3dfed1 NOHOST HU 0 0 """ LIST_HOST = """ HU -nohost- NOHOST 0 UNGROUPEDHOSTS HGU H1 Host1 00c0ff26c2360000e2399f6101010000 2 00c0ff26c2360000223a9f6101010000 HG0 H4 rac01 00c0ff26c4ea0000057f245b01010000 2 00c0ff26c4ea00008c81245b01010000 HG2 H3 rac02 00c0ff26c4ea0000f77f245b01010000 2 00c0ff26c4ea00008c81245b01010000 HG2 HU -nohost- NOHOST 0 UNGROUPEDHOSTS HGU """ LIST_VOLUME_GROUPS = """ VG6 VGroup1 00c0ff26c4ea0000ab2b9f6101000000 Volume 3672 2 V0 A A Vol0001 100.9GB 197255168 100.9GB 197255168 0B 0 Virtual 1 A 1 A 1 00c0ff26c4ea0000fa80546101000000 write-back 1 standard 0 Adaptive -1 base 15 standard 0 Standard 0 No N/A 0 197255168 dmse 600C0FF00026C4EAFA80546101000000 0% 0 A 7 10.00 % 0 Enabled OK 0 00c0ff26c4ea0000ab2b9f6101000000 VG6 V1 A A Vol0002 99.9GB 195305472 99.9GB 195305472 0B 0 Virtual 1 A 1 A 1 00c0ff26c4ea00000a81546101000000 write-back 1 standard 0 Adaptive -1 base 15 standard 0 Standard 0 No N/A 0 195305472 dmse 600C0FF00026C4EA0A81546101000000 0% 0 A 7 10.00 % 0 Enabled OK 0 00c0ff26c4ea0000ab2b9f6101000000 VG6 """ LIST_MAPS_ALL = """ VG5 00c0ff26c4ea0000e22b9f6101000000 VGroup2.* VG5_I3 VG5 I3 1,2 read-write 3 50014380029ceb58 rac02_01 Standard Vol0003 00c0ff26c4ea000082537a6101000000 0 Vol0004 00c0ff26c4ea000085537a6101000000 2 V3 00c0ff26c4ea000085537a6101000000 Vol0004 V3_I0 V3 I0 3,4 0 read-write 3 10000090fa13870f FC-port3 HPUX V0 00c0ff26c4ea0000fa80546101000000 Vol0001 V0_I1 V0 I1 1,2 0 read-write 3 10000090fa13870e FC-port2 HPUX V1 00c0ff26c4ea00000a81546101000000 Vol0002 V1_H4 V1 H4 1,2 0 read-write 3 00c0ff26c4ea0000057f245b01010000 rac01.* Standard """ error_result = [ { 'alert_id': 'A891', 'alert_name': '557', 'category': 'Fault', 'description': 'Management', 'location': 'An Enclosure Management Processor(EMP)', 'match_key': 'd0317252aed04fd8b68e79d7eab08277', 'occur_time': 1636704980000, 'resource_type': '557', 'sequence_number': 'A891', 'severity': 'ERROR', 'type': 'EquipmentAlarm' } ] volume_result = [ { 'name': 'Vol0001', 'storage_id': 'kkk', 'description': 'Vol0001', 'status': 'normal', 'native_volume_id': 'V1', 'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000', 'wwn': '600C0FF00026C4EAFA80546101000000', 'type': 'base', 'total_capacity': 107266808217, 'free_capacit': 107266808217, 'used_capacity': 0, 'blocks': 195305472, 'compressed': True, 'deduplicated': True }, { 'name': 'Vol0002', 'storage_id': 'kkk', 'description': 'Vol0002', 'status': 'normal', 'native_volume_id': 'V2', 'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000', 'wwn': '600C0FF00026C4EA0A81546101000000', 'type': 'base', 'total_capacity': 107266808217, 'free_capacit': 107266808217, 'used_capacity': 0, 'blocks': 195305472, 'compressed': True, 'deduplicated': True } ] pools_result = [ { 'name': 'A', 'storage_id': 'kkk', 'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000', 'status': 'normal', 'storage_type': 'block', 'total_capacity': 1285054214963, 'subscribed_capacity': 390610944, 'used_capacity': 214533616434, 'free_capacity': 1070520598529 } ] ports_result = [ { 'native_port_id': 'hostport_A1', 'name': 'A1', 'type': 'fc', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'A1_FC', 'storage_id': 'kkk', 'speed': 8589934592.0, 'max_speed': 8589934592.0, 'mac_address': None, 'ipv4': None, 'wwn': '207000c0ff26dcb0' }, { 'native_port_id': 'hostport_A2', 'name': 'A2', 'type': 'fc', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'A2_FC', 'storage_id': 'kkk', 'speed': 8589934592.0, 'max_speed': 8589934592.0, 'mac_address': None, 'ipv4': None, 'wwn': '217000c0ff26dcb0' }, { 'native_port_id': 'hostport_A3', 'name': 'A3', 'type': 'eth', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'A3_ISCSI', 'storage_id': 'kkk', 'speed': 0, 'max_speed': 0, 'mac_address': '00:C0:FF:35:BD:64', 'ipv4': '0.0.0.0', 'wwn': None }, { 'native_port_id': 'hostport_A4', 'name': 'A4', 'type': 'eth', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'A4_ISCSI', 'storage_id': 'kkk', 'speed': 0, 'max_speed': 0, 'mac_address': '00:C0:FF:35:BD:65', 'ipv4': '0.0.0.0', 'wwn': None }, { 'native_port_id': 'hostport_B1', 'name': 'B1', 'type': 'fc', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'B1_FC', 'storage_id': 'kkk', 'speed': 8589934592.0, 'max_speed': 8589934592.0, 'mac_address': None, 'ipv4': None, 'wwn': '247000c0ff26dcb0' }, { 'native_port_id': 'hostport_B2', 'name': 'B2', 'type': 'fc', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'B2_FC', 'storage_id': 'kkk', 'speed': 8589934592.0, 'max_speed': 8589934592.0, 'mac_address': None, 'ipv4': None, 'wwn': '257000c0ff26dcb0' }, { 'native_port_id': 'hostport_B3', 'name': 'B3', 'type': 'eth', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'B3_ISCSI', 'storage_id': 'kkk', 'speed': 0, 'max_speed': 0, 'mac_address': '00:C0:FF:35:BA:BC', 'ipv4': '0.0.0.0', 'wwn': None }, { 'native_port_id': 'hostport_B4', 'name': 'B4', 'type': 'eth', 'connection_status': 'disconnected', 'health_status': 'abnormal', 'location': 'B4_ISCSI', 'storage_id': 'kkk', 'speed': 0, 'max_speed': 0, 'mac_address': '00:C0:FF:35:BA:BD', 'ipv4': '0.0.0.0', 'wwn': None }] disks_result = [ { 'native_disk_id': '1.1', 'name': '1.1', 'physical_type': 'sas', 'status': 'normal', 'storage_id': 'kkk', 'native_disk_group_id': 'dgA01', 'serial_number': '6SL9CD560000N51404EF', 'manufacturer': 'SEAGATE', 'model': 'ST3600057SS', 'speed': 15000, 'capacity': 644352468582, 'health_score': 'normal' }, { 'native_disk_id': '1.2', 'name': '1.2', 'physical_type': 'sas', 'status': 'normal', 'storage_id': 'kkk', 'native_disk_group_id': 'dgA01', 'serial_number': '6SL7X4RE0000B42601SF', 'manufacturer': 'SEAGATE', 'model': 'ST3600057SS', 'speed': 15000, 'capacity': 644352468582, 'health_score': 'normal' }, { 'native_disk_id': '1.3', 'name': '1.3', 'physical_type': 'sas', 'status': 'normal', 'storage_id': 'kkk', 'native_disk_group_id': 'dgA01', 'serial_number': '6SL9QR5T0000N52120SK', 'manufacturer': 'SEAGATE', 'model': 'ST3600057SS', 'speed': 15000, 'capacity': 644352468582, 'health_score': 'normal' }, { 'native_disk_id': '1.4', 'name': '1.4', 'physical_type': 'sas', 'status': 'normal', 'storage_id': 'kkk', 'native_disk_group_id': 'dgA01', 'serial_number': '3SL0WT7G00009051YBTF', 'manufacturer': 'SEAGATE', 'model': 'ST3600057SS', 'speed': 15000, 'capacity': 644352468582, 'health_score': 'normal' } ] system_info = { 'name': 'msa2040', 'vendor': 'HPE', 'model': 'MSA 2040 SAN', 'status': 'normal', 'serial_number': '00C0FF26DCB0', 'firmware_version': 'GL210R004', 'location': 'Uninitialized Location', 'raw_capacity': 2577409874328, 'total_capacity': 1285054214963, 'used_capacity': 214533616434, 'free_capacity': 1070520598529 } controller_result = [ { 'native_controller_id': 'A', 'name': 'controller_a', 'storage_id': 'kkk', 'status': 'normal', 'location': 'Top', 'soft_version': 'GLS210R04-01', 'cpu_info': 'Gladden', 'cpu_count': 1, 'memory_size': 6442450944 }, { 'native_controller_id': 'B', 'name': 'controller_b', 'storage_id': 'kkk', 'status': 'normal', 'location': 'Bottom', 'soft_version': 'GLS210R04-01', 'cpu_info': 'Gladden', 'cpu_count': 1, 'memory_size': 6442450944 } ] list_storage_host_initiators = [ { 'name': 'FC-port1', 'type': 'fc', 'alias': 'I2', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I2', 'wwn': '21000024ff3dfed1', 'status': 'online', 'native_storage_host_id': 'NOHOST' }, { 'name': 'FC-port2', 'type': 'fc', 'alias': 'I1', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I1', 'wwn': '10000090fa13870e', 'status': 'online', 'native_storage_host_id': '00c0ff26c2360000e2399f6101010000' }, { 'name': 'FC-port3', 'type': 'fc', 'alias': 'I0', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I0', 'wwn': '10000090fa13870f', 'status': 'online', 'native_storage_host_id': '00c0ff26c2360000e2399f6101010000' }, { 'name': 'rac01_01', 'type': 'fc', 'alias': 'I6', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I6', 'wwn': '500143801875548e', 'status': 'online', 'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000' }, { 'name': 'rac01_02', 'type': 'fc', 'alias': 'I5', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I5', 'wwn': '5001438012097ed6', 'status': 'online', 'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000' }, { 'name': 'rac02_01', 'type': 'fc', 'alias': 'I3', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I3', 'wwn': '50014380029ceb58', 'status': 'online', 'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000' }, { 'name': 'rac02_02', 'type': 'fc', 'alias': 'I4', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I4', 'wwn': '500143801209031c', 'status': 'online', 'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000' }, { 'name': 'FC-port1', 'type': 'fc', 'alias': 'I2', 'storage_id': 'kkk', 'native_storage_host_initiator_id': 'I2', 'wwn': '21000024ff3dfed1', 'status': 'online', 'native_storage_host_id': 'NOHOST' } ] list_storage_hosts = [ { 'name': 'Host1', 'description': 'H1', 'storage_id': 'kkk', 'native_storage_host_id': '00c0ff26c2360000e2399f6101010000', 'os_type': 'HP-UX', 'status': 'normal' }, { 'name': 'rac01', 'description': 'H4', 'storage_id': 'kkk', 'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000', 'os_type': 'HP-UX', 'status': 'normal' }, { 'name': 'rac02', 'description': 'H3', 'storage_id': 'kkk', 'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000', 'os_type': 'HP-UX', 'status': 'normal' } ] list_storage_host_groups = { 'storage_host_groups': [ { 'name': 'HostGroup1', 'description': 'HG0', 'storage_id': 'kkk', 'native_storage_host_group_id': '00c0ff26c2360000223a9f6101010000', 'storage_hosts': '00c0ff26c2360000e2399f6101010000' }, { 'name': 'rac', 'description': 'HG2', 'storage_id': 'kkk', 'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000', 'storage_hosts': '00c0ff26c4ea0000057f245b01010000,' '00c0ff26c4ea0000f77f245b01010000' } ], 'storage_host_grp_host_rels': [ {'storage_id': 'kkk', 'native_storage_host_group_id': '00c0ff26c2360000223a9f6101010000', 'native_storage_host_id': '00c0ff26c2360000e2399f6101010000' }, { 'storage_id': 'kkk', 'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000', 'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000' }, { 'storage_id': 'kkk', 'native_storage_host_group_id': '00c0ff26c4ea00008c81245b01010000', 'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000' } ] } list_volume_groups = { 'volume_groups': [ { 'name': 'VGroup1', 'description': 'VG6', 'storage_id': 'kkk', 'native_volume_group_id': 'VG6', 'volumes': 'V0,V1' } ], 'vol_grp_vol_rels': [ { 'storage_id': 'kkk', 'native_volume_group_id': 'VG6', 'native_volume_id': 'V0' }, { 'storage_id': 'kkk', 'native_volume_group_id': 'VG6', 'native_volume_id': 'V1' } ] } list_masking_views = [ { 'name': 'FC-port3', 'description': 'FC-port3', 'storage_id': 'kkk', 'native_masking_view_id': 'V3_I0V3', 'native_port_group_id': 'port_group_A3B3A4B4', 'native_volume_id': 'V3', 'native_storage_host_id': '00c0ff26c2360000e2399f6101010000' }, { 'name': 'FC-port2', 'description': 'FC-port2', 'storage_id': 'kkk', 'native_masking_view_id': 'V0_I1V0', 'native_port_group_id': 'port_group_A1B1A2B2', 'native_volume_id': 'V0', 'native_storage_host_id': '00c0ff26c2360000e2399f6101010000' }, { 'name': 'rac01.*', 'description': 'rac01.*', 'storage_id': 'kkk', 'native_masking_view_id': 'V1_H4V1', 'native_port_group_id': 'port_group_A1B1A2B2', 'native_volume_id': 'V1', 'native_storage_host_id': '00c0ff26c4ea0000057f245b01010000' }, { 'name': 'rac02_01', 'description': 'rac02_01', 'storage_id': 'kkk', 'native_masking_view_id': 'VG5_I3VG5', 'native_port_group_id': 'port_group_A1B1A2B2', 'native_volume_group_id': 'VG5', 'native_storage_host_id': '00c0ff26c4ea0000f77f245b01010000' } ] ================================================ FILE: delfin/tests/unit/drivers/hpe/hpe_msa/test_hpe_msastor.py ================================================ import sys import paramiko from delfin import context from unittest import TestCase, mock from delfin.tests.unit.drivers.hpe.hpe_msa import test_constans from delfin.drivers.utils.ssh_client import SSHPool from delfin.drivers.hpe.hpe_msa.ssh_handler import SSHHandler from delfin.drivers.hpe.hpe_msa.hpe_msastor import HpeMsaStorDriver sys.modules['delfin.cryptor'] = mock.Mock() ACCESS_INFO = { "storage_id": "kkk", "ssh": { "host": "110.143.132.231", "port": 22, "username": "user", "password": "pass", "pub_key": "ddddddddddddddddddddddddd" } } class TestHpeMsaStorageDriver(TestCase): @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_ports(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_PORTS] ports = HpeMsaStorDriver(**ACCESS_INFO).list_ports(context) self.assertEqual(ports, test_constans.ports_result) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_disks(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_DISKS] disks = HpeMsaStorDriver(**ACCESS_INFO).list_disks(context) self.assertEqual(disks, test_constans.disks_result) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_controllers(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_CONTROLLERS] controller = HpeMsaStorDriver(**ACCESS_INFO).\ list_controllers(context) self.assertEqual(controller, test_constans.controller_result) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_volumes(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_VOLUMES, test_constans.LIST_POOLS] volumes = HpeMsaStorDriver(**ACCESS_INFO).list_volumes(context) self.assertEqual(volumes, test_constans.volume_result) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') @mock.patch.object(SSHHandler, 'list_storage_pools') @mock.patch.object(SSHHandler, 'list_storage_disks') @mock.patch.object(SSHHandler, 'list_storage_volume') def test_list_storage(self, mock_system, mock_ssh_get, mock_pools, mock_disks, mock_volume): mock_volume.side_effect = [test_constans.LIST_SYSTEM, test_constans.LIST_VISION] mock_disks.return_value = {paramiko.SSHClient()} mock_pools.side_effect = [test_constans.pools_result] mock_ssh_get.side_effect = [test_constans.disks_result] mock_system.side_effect = [test_constans.volume_result] system = HpeMsaStorDriver(**ACCESS_INFO).get_storage(context) self.assertEqual(system, test_constans.system_info) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') @mock.patch.object(SSHHandler, 'list_storage_volume') def test_list_storage_pools(self, mock_ssh_get, mock_control, mock_volume): mock_ssh_get.return_value = test_constans.volume_result mock_control.side_effect = {paramiko.SSHClient()} mock_volume.side_effect = [test_constans.LIST_POOLS] pools = HpeMsaStorDriver(**ACCESS_INFO).list_storage_pools(context) self.assertEqual(pools, test_constans.pools_result) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_alerts(self, mock_ssh_get, mock_control): query_para = None mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_ERROR] alerts = HpeMsaStorDriver(**ACCESS_INFO).list_alerts(query_para) self.assertEqual(alerts, test_constans.error_result) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_storage_host_initiators(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_HOST_INITIATORS] list_storage_host_initiators = HpeMsaStorDriver(**ACCESS_INFO)\ .list_storage_host_initiators(context) self.assertEqual(list_storage_host_initiators[0], test_constans .list_storage_host_initiators[0]) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_storage_hosts(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_HOST] list_storage_hosts = HpeMsaStorDriver(**ACCESS_INFO) \ .list_storage_hosts(context) self.assertEqual(list_storage_hosts, test_constans .list_storage_hosts) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_storage_host_groups(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_HOST_GROUPS] list_storage_host_groups = HpeMsaStorDriver(**ACCESS_INFO) \ .list_storage_host_groups(context) self.assertEqual(list_storage_host_groups, test_constans .list_storage_host_groups) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_volume_groups(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [test_constans.LIST_VOLUME_GROUPS] list_volume_groups = HpeMsaStorDriver(**ACCESS_INFO) \ .list_volume_groups(context) self.assertEqual(list_volume_groups, test_constans .list_volume_groups) @mock.patch.object(SSHPool, 'do_exec') @mock.patch.object(SSHPool, 'get') @mock.patch.object(SSHHandler, 'list_storage_ports') @mock.patch.object(SSHHandler, 'list_storage_hosts') @mock.patch.object(SSHHandler, 'list_storage_host_initiators') def test_list_masking_view(self, mock_ssh_get, mock_control, mock_port, mock_hosts, mock_initiators): mock_ssh_get.side_effect = [test_constans.list_storage_host_initiators] mock_control.side_effect = [test_constans.list_storage_hosts] mock_port.side_effect = [test_constans.ports_result] mock_hosts.return_value = {paramiko.SSHClient()} mock_initiators.return_value = test_constans.LIST_MAPS_ALL list_masking_views = HpeMsaStorDriver(**ACCESS_INFO) \ .list_masking_views(context) self.assertEqual(list_masking_views, test_constans .list_masking_views) ================================================ FILE: delfin/tests/unit/drivers/huawei/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/huawei/oceanstor/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/huawei/oceanstor/test_alert_handler.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from oslo_utils import importutils from delfin import exception from delfin.common import constants class AlertHandlerTestCase(unittest.TestCase): ALERT_HANDLER_CLASS = 'delfin.drivers.huawei.oceanstor.alert_handler' \ '.AlertHandler' def _get_alert_handler(self): alert_handler_class = importutils.import_class( self.ALERT_HANDLER_CLASS) alert_handler = alert_handler_class() return alert_handler def _get_fake_alert_info(self): alert_info = { '1.3.6.1.4.1.2011.2.91.10.3.1.1.2.0': 'location=location1', '1.3.6.1.4.1.2011.2.91.10.3.1.1.4.0': 'Trap Test Alarm', '1.3.6.1.4.1.2011.2.91.10.3.1.1.5.0': '2', '1.3.6.1.4.1.2011.2.91.10.3.1.1.6.0': '1', '1.3.6.1.4.1.2011.2.91.10.3.1.1.7.0': '4294967294', '1.3.6.1.4.1.2011.2.91.10.3.1.1.9.0': '4294967295', '1.3.6.1.4.1.2011.2.91.10.3.1.1.10.0': 'This is just for' ' testing.Please ' 'ignore it', '1.3.6.1.4.1.2011.2.91.10.3.1.1.11.0': '1', '1.3.6.1.4.1.2011.2.91.10.3.1.1.3.0': 'Sample advice', '1.3.6.1.4.1.2011.2.91.10.3.1.1.1.0': 'Array', '1.3.6.1.4.1.2011.2.91.10.3.1.1.8.0': '2020-6-25,1:42:26.0' } return alert_info def _get_fake_incomplete_alert_info(self): # hwIsmReportingAlarmFaultCategory is missing here alert_info = { '1.3.6.1.4.1.2011.2.91.10.3.1.1.2.0': 'location=location1', '1.3.6.1.4.1.2011.2.91.10.3.1.1.4.0': 'Trap Test Alarm', '1.3.6.1.4.1.2011.2.91.10.3.1.1.5.0': '2', '1.3.6.1.4.1.2011.2.91.10.3.1.1.6.0': '1', '1.3.6.1.4.1.2011.2.91.10.3.1.1.7.0': '4294967294', '1.3.6.1.4.1.2011.2.91.10.3.1.1.9.0': '4294967295', '1.3.6.1.4.1.2011.2.91.10.3.1.1.10.0': 'This is just ' 'for testing.' 'Please ' 'ignore it', '1.3.6.1.4.1.2011.2.91.10.3.1.1.8': '2020-6-25,1:42:26.0' } return alert_info def _get_fake_queried_alert(self): alert_info = [{ 'eventID': 1234, 'name': 'sample-event', 'level': 2, 'eventType': 0, 'sequence': '1234', 'startTime': 13200000, 'description': 'This is just for testing.Please ignore it', 'suggestion': 'Sample advice', 'location': 'location1' }] return alert_info def test_parse_alert_with_all_necessary_info(self): """ Success flow with all necessary parameters""" alert_handler_inst = self._get_alert_handler() alert = self._get_fake_alert_info() expected_alert_model = { 'alert_id': alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.7.0'], 'alert_name': alert[ '1.3.6.1.4.1.2011.2.91.10.3.1.1.4.0'], 'severity': constants.Severity.CRITICAL, 'category': constants.Category.FAULT, 'type': constants.EventType.EQUIPMENT_ALARM, 'sequence_number': alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.9.0'], 'description': alert[ '1.3.6.1.4.1.2011.2.91.10.3.1.1.10.0'], 'recovery_advice': alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.3.0'], 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': 'Node code=' + alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.1.0'] + ',' + alert['1.3.6.1.4.1.2011.2.91.10.3.1.1.2.0'] } context = {} alert_model = alert_handler_inst.parse_alert(context, alert) # Equating occur_time so that complete model can be validated expected_alert_model['occur_time'] = alert_model['occur_time'] # Verify that all other fields are matching self.assertDictEqual(expected_alert_model, alert_model) def test_parse_alert_without_mandatory_info(self): """ Error flow with some mandatory parameters missing""" alert_handler_inst = self._get_alert_handler() context = {} alert = self._get_fake_incomplete_alert_info() self.assertRaisesRegex(exception.InvalidInput, "Mandatory information " "hwIsmReportingAlarmNodeCode missing in alert " "message.", alert_handler_inst.parse_alert, context, alert) def test_parse_queried_alerts_inside_range(self): """ Success flow with all necessary parameters""" alert_handler_inst = self._get_alert_handler() alert = self._get_fake_queried_alert() expected_alert_model = [{ 'alert_id': alert[0]['eventID'], 'alert_name': alert[0]['name'], 'severity': constants.Severity.INFORMATIONAL, 'category': constants.Category.EVENT, 'type': constants.EventType.NOT_SPECIFIED, 'sequence_number': alert[0]['sequence'], 'description': alert[0]['description'], 'recovery_advice': alert[0]['suggestion'], 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': alert[0]['location'], 'occur_time': alert[0]['startTime'] * 1000 }] # With both valid begin_time and end_time query_para = {'begin_time': 13100000, 'end_time': 13300000} alert_model = alert_handler_inst.parse_queried_alerts(alert, query_para) # Verify that all other fields are matching self.assertDictEqual(expected_alert_model[0], alert_model[0]) # With only valid begin_time query_para = {'begin_time': 13100000} alert_model = alert_handler_inst.parse_queried_alerts(alert, query_para) # Verify that all other fields are matching self.assertDictEqual(expected_alert_model[0], alert_model[0]) # With only valid end_time query_para = {'end_time': 13300000} alert_model = alert_handler_inst.parse_queried_alerts(alert, query_para) # Verify that all other fields are matching self.assertDictEqual(expected_alert_model[0], alert_model[0]) def test_parse_queried_alerts_outside_range(self): """ Success flow with all necessary parameters""" alert_handler_inst = self._get_alert_handler() alert = self._get_fake_queried_alert() query_para = {'begin_time': 13300000, 'end_time': 13400000} alert_model = alert_handler_inst.parse_queried_alerts(alert, query_para) # Verify that when input alert is out of begin and end time, # it is skipped self.assertEqual(len(alert_model), 0) ================================================ FILE: delfin/tests/unit/drivers/huawei/oceanstor/test_oceanstor.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase, mock from delfin import exception from delfin import context from delfin.common import config # noqa from delfin.drivers.huawei.oceanstor.oceanstor import OceanStorDriver, consts from delfin.drivers.huawei.oceanstor.rest_client import RestClient from requests import Session class Request: def __init__(self): self.environ = {'delfin.context': context.RequestContext()} pass ACCESS_INFO = { "storage_id": "12345", "vendor": "dell_emc", "model": "vmax", "rest": { "host": "10.0.0.1", "port": "8443", "username": "user", "password": "cGFzc3dvcmQ=", }, "extra_attributes": { "array_id": "00112233" } } def create_driver(): kwargs = ACCESS_INFO m = mock.MagicMock() with mock.patch.object(Session, 'post', return_value=m): m.raise_for_status.return_value = None m.json.return_value = { 'data': { 'deviceid': '123ABC456', 'iBaseToken': 'FFFF0000', 'accountstate': 1 }, 'error': { 'code': 0, 'description': '0' } } return OceanStorDriver(**kwargs) class TestOceanStorStorageDriver(TestCase): def test_init(self): driver = create_driver() self.assertEqual(driver.storage_id, "12345") self.assertEqual(driver.sector_size, consts.SECTORS_SIZE) self.assertEqual(driver.client.device_id, '123ABC456') m = mock.MagicMock() with mock.patch.object(Session, 'post', return_value=m): m.raise_for_status.return_value = None m.json.return_value = { 'data': { 'deviceid': '123ABC456', 'iBaseToken': 'FFFF0000', 'accountstate': 1 }, 'error': { 'code': 123, 'description': '0' } } kwargs = ACCESS_INFO with self.assertRaises(Exception) as exc: OceanStorDriver(**kwargs) self.assertIn('The credentials are invalid', str(exc.exception)) def test_get_storage(self): driver = create_driver() expected = { 'name': 'OceanStor', 'vendor': 'Huawei', 'description': 'Huawei OceanStor Storage', 'model': 'OceanStor_1', 'status': 'normal', 'serial_number': '012345', 'firmware_version': '1000', 'location': 'Location1', 'total_capacity': 51200, 'used_capacity': 38400, 'free_capacity': 20480, 'raw_capacity': 76800 } ret = [ # Storage 1 { 'data': { 'RUNNINGSTATUS': '1', 'SECTORSIZE': '512', 'TOTALCAPACITY': '100', 'USEDCAPACITY': '75', 'MEMBERDISKSCAPACITY': '150', 'userFreeCapacity': '40', 'NAME': 'OceanStor_1', 'ID': '012345', 'LOCATION': 'Location1' }, 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'SOFTVER': '1000', }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): storage = driver.get_storage(context) self.assertDictEqual(storage, expected) def test_list_storage_pools(self): driver = create_driver() expected = [ { 'name': 'OceanStor_1', 'storage_id': '12345', 'native_storage_pool_id': '012345', 'description': 'Huawei OceanStor Pool', 'status': 'normal', 'storage_type': 'block', 'total_capacity': 51200, 'used_capacity': 38400, 'free_capacity': 20480 }, { 'name': 'OceanStor_1', 'storage_id': '12345', 'native_storage_pool_id': '012345', 'description': 'Huawei OceanStor Pool', 'status': 'offline', 'storage_type': 'file', 'total_capacity': 51200, 'used_capacity': 38400, 'free_capacity': 20480 } ] ret = [ { 'data': [ { 'RUNNINGSTATUS': '27', 'USAGETYPE': '1', 'USERTOTALCAPACITY': '100', 'USERCONSUMEDCAPACITY': '75', 'USERFREECAPACITY': '40', 'NAME': 'OceanStor_1', 'ID': '012345', 'LOCATION': 'Location1' }, { 'RUNNINGSTATUS': '28', 'USAGETYPE': '2', 'USERTOTALCAPACITY': '100', 'USERCONSUMEDCAPACITY': '75', 'USERFREECAPACITY': '40', 'NAME': 'OceanStor_1', 'ID': '012345', 'LOCATION': 'Location1' } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'SOFTVER': '1000', }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): pools = driver.list_storage_pools(context) self.assertDictEqual(pools[0], expected[0]) self.assertDictEqual(pools[1], expected[1]) with mock.patch.object(RestClient, 'get_all_pools', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_storage_pools(context) self.assertIn('An unknown exception occurred', str(exc.exception)) def test_list_volumes(self): driver = create_driver() expected = [ { 'name': 'Volume_1', 'storage_id': '12345', 'description': 'Huawei OceanStor volume', 'status': 'available', 'native_volume_id': '0001', 'native_storage_pool_id': '012345', 'wwn': 'wwn12345', 'type': 'thin', 'total_capacity': 51200, 'used_capacity': 38400, 'free_capacity': None, 'compressed': False, 'deduplicated': False }, { 'name': 'Volume_1', 'storage_id': '12345', 'description': 'Huawei OceanStor volume', 'status': 'error', 'native_volume_id': '0001', 'native_storage_pool_id': '012345', 'wwn': 'wwn12345', 'type': 'thick', 'total_capacity': 51200, 'used_capacity': 38400, 'free_capacity': None, 'compressed': True, 'deduplicated': True } ] ret = [ { 'data': [ { 'RUNNINGSTATUS': '27', 'USAGETYPE': '1', 'CAPACITY': '100', 'ALLOCCAPACITY': '75', 'WWN': 'wwn12345', 'NAME': 'Volume_1', 'ID': '0001', 'LOCATION': 'Location1', 'PARENTNAME': 'OceanStor_1', 'ENABLECOMPRESSION': 'false', 'ENABLEDEDUP': 'false', 'ALLOCTYPE': '1', 'SECTORSIZE': '512', }, { 'RUNNINGSTATUS': '28', 'USAGETYPE': '1', 'CAPACITY': '100', 'ALLOCCAPACITY': '75', 'WWN': 'wwn12345', 'NAME': 'Volume_1', 'ID': '0001', 'LOCATION': 'Location1', 'PARENTNAME': 'OceanStor_1', 'ENABLECOMPRESSION': 'true', 'ENABLEDEDUP': 'true', 'ALLOCTYPE': '0', 'SECTORSIZE': '512', } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'NAME': 'OceanStor_1', 'ID': '012345' }], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'SOFTVER': '1000', }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): volumes = driver.list_volumes(context) self.assertDictEqual(volumes[0], expected[0]) self.assertDictEqual(volumes[1], expected[1]) with mock.patch.object(RestClient, 'get_all_volumes', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_volumes(context) self.assertIn('An unknown exception occurred', str(exc.exception)) def test_list_ports(self): driver = create_driver() expected = [ { 'name': 'TEST_FC_PORT', 'storage_id': '12345', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'Location1', 'logical_type': 'service', 'max_speed': '16000', 'native_port_id': '012345', 'native_parent_id': '0B.0', 'wwn': 'WWN_123000', 'type': 'fc', 'speed': None, 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, }, { 'name': 'TEST_FCOE_PORT', 'storage_id': '12345', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'Location2', 'logical_type': 'service', 'max_speed': '12000', 'native_port_id': '22222', 'native_parent_id': '0B.2', 'wwn': '2210', 'type': 'fcoe', 'speed': None, 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, }, { 'name': 'TEST_ETH_PORT', 'storage_id': '12345', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'Location3', 'logical_type': 'service', 'max_speed': '1000', 'native_port_id': '11111', 'native_parent_id': '0B.0', 'wwn': None, 'type': 'eth', 'speed': '-1', 'mac_address': 'MAC_1:ff:00', 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, }, { 'name': 'TEST_PCIE_PORT', 'storage_id': '12345', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'Location4', 'logical_type': 'other', 'max_speed': '8000', 'native_port_id': '33333', 'native_parent_id': '1090', 'wwn': None, 'type': 'other', 'speed': '5000', 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, }, { 'name': 'TEST_BOND_PORT', 'storage_id': '12345', 'connection_status': 'connected', 'health_status': 'unknown', 'location': 'Location5', 'logical_type': 'other', 'max_speed': None, 'native_port_id': '44444', 'native_parent_id': None, 'wwn': None, 'type': 'other', 'speed': None, 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, }, { 'name': 'TEST_SAS_PORT', 'storage_id': '12345', 'connection_status': 'unknown', 'health_status': 'unknown', 'location': 'Location6', 'logical_type': 'other', 'max_speed': '12000', 'native_port_id': '55555', 'native_parent_id': '0A', 'wwn': None, 'type': 'sas', 'speed': '12000', 'mac_address': None, 'ipv4': None, 'ipv4_mask': None, 'ipv6': None, 'ipv6_mask': None, } ] ret = [ { 'data': [ { 'TYPE': '212', 'NAME': 'TEST_FC_PORT', 'RUNNINGSTATUS': '11', 'HEALTHSTATUS': '1', 'ID': '012345', 'LOCATION': 'Location1', 'MAXSPEED': '16000', 'MAXSUPPORTSPEED': '16000', 'LOGICTYPE': '0', 'RUNSPEED': '-1', 'PARENTID': '0B.0', 'WWN': 'WWN_123000', }, ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'TYPE': '252', 'NAME': 'TEST_FCOE_PORT', 'RUNNINGSTATUS': '11', 'HEALTHSTATUS': '1', 'ID': '22222', 'LOCATION': 'Location2', 'MAXSPEED': '12000', 'LOGICTYPE': '0', 'RUNSPEED': '-1', 'PARENTID': '0B.2', 'WWN': '2210', }], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'TYPE': '213', 'NAME': 'TEST_ETH_PORT', 'RUNNINGSTATUS': '11', 'HEALTHSTATUS': '1', 'ID': '11111', 'LOCATION': 'Location3', 'SPEED': '-1', 'maxSpeed': '1000', 'LOGICTYPE': '0', 'RUNSPEED': '-1', 'PARENTID': '0B.0', 'MACADDRESS': 'MAC_1:ff:00', 'IP4ADDR': '', 'IP4MASK': '', 'IP6ADDR': '', 'IP6MASK': '', }], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'TYPE': '233', 'NAME': 'TEST_PCIE_PORT', 'RUNNINGSTATUS': '11', 'HEALTHSTATUS': '1', 'ID': '33333', 'LOCATION': 'Location4', 'PCIESPEED': '5000', 'MAXSPEED': '8000', 'PARENTID': '1090', }], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'TYPE': '235', 'NAME': 'TEST_BOND_PORT', 'RUNNINGSTATUS': '10', 'HEALTHSTATUS': '1', 'ID': '44444', 'LOCATION': 'Location5', }], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'TYPE': '214', 'NAME': 'TEST_SAS_PORT', 'RUNNINGSTATUS': '0', 'HEALTHSTATUS': '0', 'ID': '55555', 'LOCATION': 'Location6', 'RUNSPEED': '12000', 'MAXSPEED': '12000', 'PARENTID': '0A', }], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'TYPE': '210', 'ID': '012345', 'NAME': 'Name100', 'RUNNINGSTATUS': '27', 'HEALTHSTATUS': '0', }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): ports = driver.list_ports(context) self.assertDictEqual(ports[0], expected[0]) self.assertDictEqual(ports[1], expected[1]) self.assertDictEqual(ports[2], expected[2]) self.assertDictEqual(ports[3], expected[3]) self.assertDictEqual(ports[4], expected[4]) self.assertDictEqual(ports[5], expected[5]) with mock.patch.object(RestClient, 'get_all_ports', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_ports(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_ports', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_ports(context) self.assertIn('', str(exc.exception)) def test_list_controllers(self): driver = create_driver() expected = [ { 'name': 'Controller-1', 'storage_id': '12345', 'native_controller_id': '0A', 'status': 'normal', 'location': 'Location1', 'soft_version': 'Ver123', 'cpu_info': 'Intel Xenon', 'memory_size': '100000', }, { 'name': 'Controller-2', 'storage_id': '12345', 'native_controller_id': '0B', 'status': 'offline', 'location': 'Location2', 'soft_version': 'VerABC', 'cpu_info': 'ARM64', 'memory_size': '500000', }, { 'name': 'Controller-3', 'storage_id': '12345', 'native_controller_id': '0B', 'status': 'unknown', 'location': 'Location3', 'soft_version': 'VerABC', 'cpu_info': 'ARM64', 'memory_size': '500000', } ] ret = [ { 'data': [ { 'RUNNINGSTATUS': '27', 'NAME': 'Controller-1', 'SOFTVER': 'Ver123', 'CPUINFO': 'Intel Xenon', 'MEMORYSIZE': '100000', 'ID': '0A', 'LOCATION': 'Location1' }, { 'RUNNINGSTATUS': '28', 'NAME': 'Controller-2', 'SOFTVER': 'VerABC', 'CPUINFO': 'ARM64', 'MEMORYSIZE': '500000', 'ID': '0B', 'LOCATION': 'Location2' }, { 'RUNNINGSTATUS': '0', 'NAME': 'Controller-3', 'SOFTVER': 'VerABC', 'CPUINFO': 'ARM64', 'MEMORYSIZE': '500000', 'ID': '0B', 'LOCATION': 'Location3' }, ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'SOFTVER': '1000', }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): controller = driver.list_controllers(context) self.assertDictEqual(controller[0], expected[0]) self.assertDictEqual(controller[1], expected[1]) self.assertDictEqual(controller[2], expected[2]) with mock.patch.object(RestClient, 'get_all_controllers', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_controllers(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_controllers', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_controllers(context) self.assertIn('', str(exc.exception)) def test_list_disks(self): driver = create_driver() expected = [ { 'name': 'ST200:1234', 'storage_id': '12345', 'native_disk_id': '0A', 'serial_number': '1234', 'manufacturer': 'Segate', 'model': 'ST200', 'firmware': '0003', 'speed': 7200, 'capacity': 1000000, 'status': 'normal', 'physical_type': 'unknown', 'logical_type': 'free', 'health_score': '255', 'native_disk_group_id': None, 'location': 'Location1', }, { 'name': 'WD00:1111', 'storage_id': '12345', 'native_disk_id': '0B', 'serial_number': '1111', 'manufacturer': 'WesterDigital', 'model': 'WD00', 'firmware': '123', 'speed': 10000, 'capacity': 5000000, 'status': 'offline', 'physical_type': 'ssd', 'logical_type': 'free', 'health_score': '255', 'native_disk_group_id': None, 'location': 'Location2', }, { 'name': 'ST200:1234', 'storage_id': '12345', 'native_disk_id': '0A', 'serial_number': '1234', 'manufacturer': 'Segate', 'model': 'ST200', 'firmware': '0003', 'speed': 7200, 'capacity': 1000000, 'status': 'abnormal', 'physical_type': 'unknown', 'logical_type': 'free', 'health_score': '255', 'native_disk_group_id': None, 'location': 'Location1', } ] ret = [ { 'data': [ { 'RUNNINGSTATUS': '27', 'DISKTYPE': '4', 'LOGICTYPE': '1', 'HEALTHMARK': '255', 'MODEL': 'ST200', 'SERIALNUMBER': '1234', 'MANUFACTURER': 'Segate', 'FIRMWAREVER': '0003', 'SPEEDRPM': '7200', 'SECTORS': '10000', 'SECTORSIZE': '100', 'ID': '0A', 'LOCATION': 'Location1' }, { 'RUNNINGSTATUS': '28', 'DISKTYPE': '3', 'LOGICTYPE': '1', 'HEALTHMARK': '255', 'MODEL': 'WD00', 'SERIALNUMBER': '1111', 'MANUFACTURER': 'WesterDigital', 'FIRMWAREVER': '123', 'SPEEDRPM': '10000', 'SECTORS': '50000', 'SECTORSIZE': '100', 'ID': '0B', 'LOCATION': 'Location2' }, { 'RUNNINGSTATUS': '0', 'DISKTYPE': '4', 'LOGICTYPE': '1', 'HEALTHMARK': '255', 'MODEL': 'ST200', 'SERIALNUMBER': '1234', 'MANUFACTURER': 'Segate', 'FIRMWAREVER': '0003', 'SPEEDRPM': '7200', 'SECTORS': '10000', 'SECTORSIZE': '100', 'ID': '0A', 'LOCATION': 'Location1' } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'SOFTVER': '1000', }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): disk = driver.list_disks(context) self.assertDictEqual(disk[0], expected[0]) self.assertDictEqual(disk[1], expected[1]) self.assertDictEqual(disk[2], expected[2]) with mock.patch.object(RestClient, 'get_all_disks', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_disks(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_disks', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_disks(context) self.assertIn('', str(exc.exception)) def test_list_filesystems(self): driver = create_driver() expected = [ { 'name': 'fs1', 'storage_id': '12345', 'native_filesystem_id': '123', 'native_pool_id': '123', 'compressed': True, 'deduplicated': True, 'worm': 'non_worm', 'status': 'normal', 'type': 'thin', 'total_capacity': 81920, 'used_capacity': 8192, 'free_capacity': 8192, }, { 'name': 'fs2', 'storage_id': '12345', 'native_filesystem_id': '123', 'native_pool_id': '123', 'compressed': False, 'deduplicated': False, 'worm': 'compliance', 'status': 'normal', 'type': 'thin', 'total_capacity': 81920, 'used_capacity': 81920, 'free_capacity': 8192, }, { 'name': 'fs3', 'storage_id': '12345', 'native_filesystem_id': '123', 'native_pool_id': '123', 'compressed': True, 'deduplicated': True, 'worm': 'audit_log', 'status': 'normal', 'type': 'thin', 'total_capacity': 81920, 'used_capacity': 8192, 'free_capacity': 8192, } ] ret = [ { 'data': [ { 'HEALTHSTATUS': '1', 'ALLOCTYPE': '1', 'SECTORSIZE': '8192', 'CAPACITY': '10', 'ALLOCCAPACITY': '1', 'AVAILABLECAPCITY': '1', 'ENABLECOMPRESSION': 'true', 'ENABLEDEDUP': 'true', 'NAME': 'fs1', 'ID': '123', 'PARENTTYPE': 216, 'PARENTID': '123', 'WORMTYPE': '0' }, { 'HEALTHSTATUS': '1', 'ALLOCTYPE': '1', 'SECTORSIZE': '8192', 'CAPACITY': '10', 'ALLOCCAPACITY': '10', 'AVAILABLECAPCITY': '1', 'ENABLECOMPRESSION': 'false', 'ENABLEDEDUP': 'false', 'NAME': 'fs2', 'ID': '123', 'PARENTTYPE': 216, 'PARENTID': '123', 'WORMTYPE': '1' }, { 'HEALTHSTATUS': '1', 'ALLOCTYPE': '1', 'SECTORSIZE': '8192', 'CAPACITY': '10', 'ALLOCCAPACITY': '1', 'AVAILABLECAPCITY': '1', 'ENABLECOMPRESSION': 'true', 'ENABLEDEDUP': 'true', 'NAME': 'fs3', 'ID': '123', 'PARENTTYPE': 216, 'PARENTID': '123', 'WORMTYPE': '2' } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'SOFTVER': '1000', }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): fs = driver.list_filesystems(context) self.assertDictEqual(fs[0], expected[0]) self.assertDictEqual(fs[1], expected[1]) self.assertDictEqual(fs[2], expected[2]) with mock.patch.object(RestClient, 'get_all_filesystems', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_filesystems(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_filesystems', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_filesystems(context) self.assertIn('', str(exc.exception)) def test_list_qtrees(self): driver = create_driver() expected = [ { 'name': 'qtree1', 'storage_id': '12345', 'native_qtree_id': '123', 'native_filesystem_id': '123', 'security_mode': 'mixed', }, { 'name': 'WD00:1111', 'storage_id': '12345', 'native_disk_id': '0B', 'serial_number': '1111', 'manufacturer': 'WesterDigital', 'model': 'WD00', 'firmware': '123', 'speed': 10000, 'capacity': 5000000, 'status': 'offline', 'physical_type': 'ssd', 'logical_type': 'free', 'health_score': '255', 'native_disk_group_id': None, 'location': 'Location2', }, { 'name': 'ST200:1234', 'storage_id': '12345', 'native_disk_id': '0A', 'serial_number': '1234', 'manufacturer': 'Segate', 'model': 'ST200', 'firmware': '0003', 'speed': 7200, 'capacity': 1000000, 'status': 'abnormal', 'physical_type': 'unknown', 'logical_type': 'free', 'health_score': '255', 'native_disk_group_id': None, 'location': 'Location1', } ] ret = [ { 'data': [ { 'NAME': 'qtree1', 'ID': '123', 'securityStyle': '0', 'PARENTTYPE': 40, 'PARENTID': '123', }, ], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'get_all_filesystems', side_effect=[[{"ID": "1"}]]): with mock.patch.object(RestClient, 'do_call', side_effect=ret): qtree = driver.list_qtrees(context) self.assertDictEqual(qtree[0], expected[0]) with mock.patch.object(RestClient, 'get_all_filesystems', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_qtrees(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_filesystems', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_qtrees(context) self.assertIn('', str(exc.exception)) def test_list_shares(self): driver = create_driver() expected = [ { 'name': 'CIFS', 'storage_id': '12345', 'native_share_id': '111', 'native_filesystem_id': 'FS111', 'path': '/filesystem0001/', 'protocol': 'cifs' }, { 'name': 'NFS', 'storage_id': '12345', 'native_share_id': '222', 'native_filesystem_id': 'FS222', 'path': '/filesystem0002/', 'protocol': 'nfs' }, { 'name': 'FTP', 'storage_id': '12345', 'native_share_id': '333', 'native_filesystem_id': 'FS333', 'path': '/filesystem0003/', 'protocol': 'ftp' } ] ret = [ { 'data': [ { 'subType': '0', 'NAME': 'CIFS', 'SHAREPATH': '/filesystem0001/', 'ID': '111', 'FSID': 'FS111' }, ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'type': '16401', 'NAME': 'NFS', 'SHAREPATH': '/filesystem0002/', 'ID': '222', 'FSID': 'FS222' }], 'error': { 'code': 0, 'description': '0' } }, { 'data': [{ 'ACCESSNAME': 'test', 'NAME': 'FTP', 'SHAREPATH': '/filesystem0003/', 'ID': '333', 'FSID': 'FS333' }], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): share = driver.list_shares(context) self.assertDictEqual(share[0], expected[0]) self.assertDictEqual(share[1], expected[1]) self.assertDictEqual(share[2], expected[2]) with mock.patch.object(RestClient, 'get_all_shares', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_shares(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_shares', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_shares(context) self.assertIn('', str(exc.exception)) def test_list_storage_host_initiators(self): driver = create_driver() expected = [ { 'name': '12', 'description': 'FC Initiator', 'alias': '1212121212121212', 'storage_id': '12345', 'native_storage_host_initiator_id': '1212121212121212', 'wwn': '1212121212121212', 'status': 'online', 'native_storage_host_id': '0' } ] ret = [ { 'data': [ { "HEALTHSTATUS": "1", "ID": "1212121212121212", "ISFREE": "true", "MULTIPATHTYPE": "1", "NAME": "12", "OPERATIONSYSTEM": "1", "PARENTID": "0", "PARENTTYPE": 0, "PARENTNAME": "Host001", "RUNNINGSTATUS": "27", "TYPE": 223, "FAILOVERMODE": "3", "SPECIALMODETYPE": "2", "PATHTYPE": "1" } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [ { "HEALTHSTATUS": "1", "ID": "111111111111111111", "ISFREE": "false", "MULTIPATHTYPE": "1", "OPERATIONSYSTEM": "255", "PARENTID": "0", "PARENTNAME": "Host001", "PARENTTYPE": 21, "RUNNINGSTATUS": "28", "TYPE": 222, "USECHAP": "false", "FAILOVERMODE": "3", "SPECIALMODETYPE": "2", "PATHTYPE": "1" } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [ { "HEALTHSTATUS": "1", "ID": "1111111111111119", "ISFREE": "true", "MULTIPATHTYPE": "1", "NAME": "", "OPERATIONSYSTEM": "1", "RUNNINGSTATUS": "28", "TYPE": 16499, "FAILOVERMODE": "3", "SPECIALMODETYPE": "2", "PATHTYPE": "1" } ], 'error': { 'code': 0, 'description': '0' } }, ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): initators = driver.list_storage_host_initiators(context) self.assertDictEqual(initators[0], expected[0]) with mock.patch.object(RestClient, 'get_all_initiators', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_storage_host_initiators(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_initiators', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_storage_host_initiators(context) self.assertIn('', str(exc.exception)) def test_list_storage_hosts(self): driver = create_driver() expected = [ { 'name': 'Host001', 'description': '', 'storage_id': '12345', 'native_storage_host_id': '0', 'os_type': 'Linux', 'status': 'normal', 'ip_address': '' } ] ret = [ { 'data': [ { "DESCRIPTION": "", "HEALTHSTATUS": "1", "ID": "0", "INITIATORNUM": "0", "IP": "", "ISADD2HOSTGROUP": "true", "LOCATION": "", "MODEL": "", "NAME": "Host001", "NETWORKNAME": "", "OPERATIONSYSTEM": "0", "RUNNINGSTATUS": "1", "TYPE": 21, "vstoreId": "4", "vstoreName": "vStore004" } ], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): hosts = driver.list_storage_hosts(context) self.assertDictEqual(hosts[0], expected[0]) with mock.patch.object(RestClient, 'get_all_hosts', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_storage_hosts(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_hosts', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_storage_hosts(context) self.assertIn('', str(exc.exception)) def test_list_storage_host_groups(self): driver = create_driver() expected = [ { 'name': 'hostgroup1', 'description': '', 'storage_id': '12345', 'native_storage_host_group_id': '0', 'storage_hosts': '123' } ] ret = [ { 'data': [ { "DESCRIPTION": "", "ID": "0", "ISADD2MAPPINGVIEW": "false", "NAME": "hostgroup1", "TYPE": 14, "vstoreId": "4", "vstoreName": "vStore004" }, ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [ { "ID": "123", }, ], 'error': { 'code': 0, 'description': '0' } } ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): hg = driver.list_storage_host_groups(context) self.assertDictEqual(hg[0], expected[0]) with mock.patch.object(RestClient, 'get_all_host_groups', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_storage_host_groups(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_host_groups', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_storage_host_groups(context) self.assertIn('', str(exc.exception)) def test_list_port_groups(self): driver = create_driver() expected = [ { 'name': 'PortGroup001', 'description': '', 'storage_id': '12345', 'native_port_group_id': '0', 'ports': '123,124,125', } ] ret = [ { 'data': [ { "DESCRIPTION": "", "ID": "0", "NAME": "PortGroup001", "TYPE": 257 } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [ { "ID": "123", } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [ { "ID": "124", } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [ { "ID": "125", } ], 'error': { 'code': 0, 'description': '0' } }, ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): port_groups = driver.list_port_groups(context) self.assertDictEqual(port_groups[0], expected[0]) with mock.patch.object(RestClient, 'get_all_port_groups', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_port_groups(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_port_groups', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_port_groups(context) self.assertIn('', str(exc.exception)) def test_list_volume_groups(self): driver = create_driver() expected = [ { 'name': 'LUNGroup001', 'description': '', 'storage_id': '12345', 'native_volume_group_id': '0', 'volumes': '123' } ] ret = [ { 'data': [ { "APPTYPE": "0", "CAPCITY": "2097152", "CONFIGDATA": "", "DESCRIPTION": "", "GROUPTYPE": "0", "ID": "0", "ISADD2MAPPINGVIEW": "false", "NAME": "LUNGroup001", "TYPE": 256, "vstoreId": "4", "vstoreName": "vStore004" } ], 'error': { 'code': 0, 'description': '0' } }, { 'data': [ { "ID": "123", } ], 'error': { 'code': 0, 'description': '0' } }, ] with mock.patch.object(RestClient, 'do_call', side_effect=ret): volume_groups = driver.list_volume_groups(context) self.assertDictEqual(volume_groups[0], expected[0]) with mock.patch.object(RestClient, 'get_all_volume_groups', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_volume_groups(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_volume_groups', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_volume_groups(context) self.assertIn('', str(exc.exception)) @mock.patch.object(RestClient, 'get_all_associate_mapping_views') @mock.patch.object(RestClient, 'get_all_port_groups') @mock.patch.object(RestClient, 'get_all_volume_groups') @mock.patch.object(RestClient, 'get_all_host_groups') def test_list_masking_views(self, mock_hg, mock_vg, mock_pg, mock_associate): driver = create_driver() expected = [ { 'name': 'MappingView001', 'description': '', 'storage_id': '12345', 'native_masking_view_id': '1', } ] ret = [ { 'data': [ { "DESCRIPTION": "", "ENABLEINBANDCOMMAND": "true", "ID": "1", "INBANDLUNWWN": "", "NAME": "MappingView001", "TYPE": 245, "vstoreId": "4", "vstoreName": "vStore004" } ], 'error': { 'code': 0, 'description': '0' } } ] mock_hg.return_value = [] mock_vg.return_value = [] mock_pg.return_value = [] mock_associate.return_value = [] with mock.patch.object(RestClient, 'do_call', side_effect=ret): view = driver.list_masking_views(context) self.assertDictEqual(view[0], expected[0]) with mock.patch.object(RestClient, 'get_all_mapping_views', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.list_masking_views(context) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_all_mapping_views', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.list_masking_views(context) self.assertIn('', str(exc.exception)) @mock.patch.object(RestClient, 'get_disk_metrics') @mock.patch.object(RestClient, 'get_port_metrics') @mock.patch.object(RestClient, 'get_controller_metrics') @mock.patch.object(RestClient, 'get_volume_metrics') @mock.patch.object(RestClient, 'get_pool_metrics') @mock.patch.object(RestClient, 'enable_metrics_collection') @mock.patch.object(RestClient, 'disable_metrics_collection') def test_collect_perf_metrics(self, mock_di, mock_en, mock_pool, mock_volume, mock_controller, mock_port, mock_disk): driver = create_driver() ret = [ { 'data': [{}], 'error': { 'code': 0, 'description': '0' } } ] mock_di.return_value = None mock_en.return_value = None mock_pool.return_value = [{}] mock_volume.return_value = [{}] mock_controller.return_value = [{}] mock_port.return_value = [{}] mock_disk.return_value = [{}] with mock.patch.object(RestClient, 'do_call', side_effect=ret): storage_id = 123 resource_metrics = { 'storagePool': {'iops': 'iops description'}, 'volume': {'iops': 'iops description'}, 'port': {'iops': 'iops description'}, 'disk': {'iops': 'iops description'}, } start, end = 0, 1 driver.collect_perf_metrics( context, storage_id, resource_metrics, start, end) mock_en.assert_called() mock_di.assert_called() mock_pool.assert_called() mock_volume.assert_called() mock_controller.assert_not_called() mock_port.assert_called() mock_disk.assert_called() with mock.patch.object(RestClient, 'get_disk_metrics', side_effect=exception.DelfinException): with self.assertRaises(Exception) as exc: driver.collect_perf_metrics(context, 0, {'disk': {'iops': 'iops'}}, 0, 0) self.assertIn('An unknown exception occurred', str(exc.exception)) with mock.patch.object(RestClient, 'get_disk_metrics', side_effect=TypeError): with self.assertRaises(Exception) as exc: driver.collect_perf_metrics(context, 0, {'disk': {'iops': 'iops'}}, 0, 0) self.assertIn('', str(exc.exception)) def test_get_capabilities(self): driver = create_driver() cap = driver.get_capabilities(context) self.assertIsNotNone(cap.get('resource_metrics')) self.assertIsNotNone(cap.get('resource_metrics').get('storagePool')) self.assertIsNotNone(cap.get('resource_metrics').get('volume')) self.assertIsNotNone(cap.get('resource_metrics').get('controller')) self.assertIsNotNone(cap.get('resource_metrics').get('port')) self.assertIsNotNone(cap.get('resource_metrics').get('disk')) ================================================ FILE: delfin/tests/unit/drivers/huawei/oceanstor/test_rest_client.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase, mock from unittest.mock import call from requests.sessions import Session from delfin import exception from delfin.common import config # noqa from delfin.drivers.huawei.oceanstor.rest_client import RestClient ACCESS_INFO = { "storage_id": "12345", "vendor": "huawei", "model": "oceanstor", "rest": { "host": "10.0.0.1", "port": 1234, "username": "user", "password": "cGFzc3dvcmQ=" }, "extra_attributes": { "array_id": "00112233" } } RESP = { "error": { "code": 0 }, "data": { "data": "dummy", "deviceid": "0123456", "iBaseToken": "112233", "accountstate": "GREEN" } } class TestOceanStorRestClient(TestCase): def _mock_response( self, status=200, content="CONTENT", json_data=None, raise_for_status=None): mock_resp = mock.Mock() mock_resp.raise_for_status = mock.Mock() if raise_for_status: mock_resp.raise_for_status.side_effect = raise_for_status mock_resp.status_code = status mock_resp.content = content if json_data: mock_resp.json = mock.Mock( return_value=json_data ) return mock_resp # @mock.patch.object(RestClient, 'login') @mock.patch.object(Session, 'post') def test_init(self, mock_rest): mock_resp = self._mock_response(json_data=RESP) mock_rest.return_value = mock_resp kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) self.assertEqual(rest_client.rest_host, "10.0.0.1") self.assertEqual(rest_client.rest_port, 1234) self.assertEqual(rest_client.session.headers['iBaseToken'], '112233') @mock.patch.object(RestClient, 'login') def test_reset_connection(self, mock_login): mock_login.return_value = None kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) self.assertEqual(rest_client.rest_host, "10.0.0.1") self.assertEqual(rest_client.rest_port, 1234) mock_login.side_effect = exception.StorageBackendException with self.assertRaises(Exception) as exc: RestClient(**kwargs) self.assertIn('The credentials are invalid', str(exc.exception)) @mock.patch.object(RestClient, 'call') @mock.patch.object(RestClient, 'login') def test_get_storage(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_storage() self.assertEqual(data['data'], 'dummy') mock_call.return_value = { "error": { "code": 0 } } with self.assertRaises(Exception) as exc: rest_client.get_storage() self.assertIn('Exception from Storage Backend', str(exc.exception)) mock_call.return_value['error']['code'] = 1 with self.assertRaises(Exception) as exc: rest_client.get_storage() self.assertIn('Exception from Storage Backend', str(exc.exception)) @mock.patch.object(RestClient, 'call') @mock.patch.object(RestClient, 'login') def test_get_controller(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_controllers() self.assertEqual(data['data'], 'dummy') mock_call.assert_called_with("/controller", log_filter_flag=True, method='GET') @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_pools(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_pools() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/storagepool", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_hosts(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_hosts() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/host", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_host_groups(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_host_groups() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/hostgroup", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_port_groups(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_port_groups() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/portgroup", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_volume_groups(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_volume_groups() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/lungroup", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_volumes(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_volumes() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/lun", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_initiators(self, mock_login, mock_call): mock_login.return_value = None mock_call.side_effects = ["", "", ""] kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) rest_client.get_all_initiators() call1 = call("/fc_initiator", None, 'GET', log_filter_flag=True) call2 = call("/iscsi_initiator", None, 'GET', log_filter_flag=True) call3 = call("/ib_initiator", None, 'GET', log_filter_flag=True) calls = [call1, call2, call3] mock_call.assert_has_calls(calls) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_all_mapping_views(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_mapping_views() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/mappingview", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_volumes(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.get_all_volumes() self.assertEqual(data['data']['data'], 'dummy') mock_call.assert_called_with("/lun", None, 'GET', log_filter_flag=True) @mock.patch.object(RestClient, 'call') @mock.patch.object(RestClient, 'login') def test_enable_metrics_collection(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.enable_metrics_collection() self.assertEqual(data['data'], 'dummy') mock_call.assert_called_with("/performance_statistic_switch", {'CMO_PERFORMANCE_SWITCH': '1'}, log_filter_flag=True, method='PUT') @mock.patch.object(RestClient, 'call') @mock.patch.object(RestClient, 'login') def test_disable_metrics_collection(self, mock_login, mock_call): mock_login.return_value = None mock_call.return_value = RESP kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) data = rest_client.disable_metrics_collection() self.assertEqual(data['data'], 'dummy') mock_call.assert_called_with("/performance_statistic_switch", {'CMO_PERFORMANCE_SWITCH': '0'}, log_filter_flag=True, method='PUT') @mock.patch.object(RestClient, 'disable_metrics_collection') @mock.patch.object(RestClient, 'enable_metrics_collection') @mock.patch.object(RestClient, 'call') @mock.patch.object(RestClient, 'login') def test_configure_metrics_collection(self, mock_login, mock_call, mock_en, mock_di): mock_login.return_value = None mock_call.return_value = RESP mock_en.return_value = None mock_di.return_value = None kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) rest_client.configure_metrics_collection() data = { "CMO_STATISTIC_ARCHIVE_SWITCH": 1, "CMO_STATISTIC_ARCHIVE_TIME": 300, "CMO_STATISTIC_AUTO_STOP": 0, "CMO_STATISTIC_INTERVAL": 60, "CMO_STATISTIC_MAX_TIME": 0 } mock_call.assert_called_with("/performance_statistic_strategy", data, log_filter_flag=True, method='PUT') @mock.patch.object(RestClient, 'get_all_pools') @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_pool_metrics(self, mock_login, mock_call, mock_pools): mock_login.return_value = None mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25', 'CMO_STATISTIC_TIMESTAMP': 0}] mock_pools.return_value = [ {'ID': '123', 'TYPE': '100', 'NAME': 'pool'} ] kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) metrics = rest_client.get_pool_metrics('', {'iops': {'unit': 'IOPS'}}) mock_call.assert_called_with( "/performace_statistic/cur_statistic_data", None, 'GET', log_filter_flag=True, params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&' 'timeConversion=0&' ) expected_label = { 'storage_id': '', 'resource_type': 'pool', 'resource_id': '123', 'type': 'RAW', 'unit': 'IOPS', 'resource_name': 'pool' } self.assertEqual(metrics[0].name, 'iops') self.assertDictEqual(metrics[0].labels, expected_label) self.assertListEqual(list(metrics[0].values.values()), [12]) @mock.patch.object(RestClient, 'get_all_volumes') @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_volume_metrics(self, mock_login, mock_call, mock_volumes): mock_login.return_value = None mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25', 'CMO_STATISTIC_TIMESTAMP': 0}] mock_volumes.return_value = [ {'ID': '123', 'TYPE': '100', 'NAME': 'volume'} ] kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) metrics = rest_client.get_volume_metrics( '', {'iops': {'unit': 'IOPS'}}) mock_call.assert_called_with( "/performace_statistic/cur_statistic_data", None, 'GET', log_filter_flag=True, params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&' 'timeConversion=0&' ) expected_label = { 'storage_id': '', 'resource_type': 'volume', 'resource_id': '123', 'type': 'RAW', 'unit': 'IOPS', 'resource_name': 'volume' } self.assertEqual(metrics[0].name, 'iops') self.assertDictEqual(metrics[0].labels, expected_label) self.assertListEqual(list(metrics[0].values.values()), [12]) @mock.patch.object(RestClient, 'get_all_controllers') @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_controller_metrics(self, mock_login, mock_call, mock_controllers): mock_login.return_value = None mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25', 'CMO_STATISTIC_TIMESTAMP': 0}] mock_controllers.return_value = [ {'ID': '123', 'TYPE': '100', 'NAME': 'controller'} ] kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) metrics = rest_client.get_controller_metrics( '', {'iops': {'unit': 'IOPS'}}) mock_call.assert_called_with( "/performace_statistic/cur_statistic_data", None, 'GET', log_filter_flag=True, params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&' 'timeConversion=0&' ) expected_label = { 'storage_id': '', 'resource_type': 'controller', 'resource_id': '123', 'type': 'RAW', 'unit': 'IOPS', 'resource_name': 'controller' } self.assertEqual(metrics[0].name, 'iops') self.assertDictEqual(metrics[0].labels, expected_label) self.assertListEqual(list(metrics[0].values.values()), [12]) @mock.patch.object(RestClient, 'get_all_ports') @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_port_metrics(self, mock_login, mock_call, mock_ports): mock_login.return_value = None mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25', 'CMO_STATISTIC_TIMESTAMP': 0}] mock_ports.return_value = [ {'ID': '123', 'TYPE': '100', 'NAME': 'port'} ] kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) metrics = rest_client.get_port_metrics('', {'iops': {'unit': 'IOPS'}}) mock_call.assert_called_with( "/performace_statistic/cur_statistic_data", None, 'GET', log_filter_flag=True, params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&' 'timeConversion=0&' ) expected_label = { 'storage_id': '', 'resource_type': 'port', 'resource_id': '123', 'type': 'RAW', 'unit': 'IOPS', 'resource_name': 'port' } self.assertEqual(metrics[0].name, 'iops') self.assertDictEqual(metrics[0].labels, expected_label) self.assertListEqual(list(metrics[0].values.values()), [12]) @mock.patch.object(RestClient, 'get_all_disks') @mock.patch.object(RestClient, 'paginated_call') @mock.patch.object(RestClient, 'login') def test_get_disk_metrics(self, mock_login, mock_call, mock_disks): mock_login.return_value = None mock_call.return_value = [{'CMO_STATISTIC_DATA_LIST': '12,25', 'CMO_STATISTIC_TIMESTAMP': 0}] mock_disks.return_value = [ {'ID': '123', 'TYPE': '100', 'MODEL': 'disk', 'SERIALNUMBER': '0'} ] kwargs = ACCESS_INFO rest_client = RestClient(**kwargs) metrics = rest_client.get_disk_metrics('', {'iops': {'unit': 'IOPS'}}) mock_call.assert_called_with( "/performace_statistic/cur_statistic_data", None, 'GET', log_filter_flag=True, params='CMO_STATISTIC_UUID=100:123&CMO_STATISTIC_DATA_ID_LIST=22&' 'timeConversion=0&' ) expected_label = { 'storage_id': '', 'resource_type': 'disk', 'resource_id': '123', 'resource_name': 'disk:0', 'type': 'RAW', 'unit': 'IOPS', } self.assertEqual(metrics[0].name, 'iops') self.assertDictEqual(metrics[0].labels, expected_label) self.assertListEqual(list(metrics[0].values.values()), [12]) ================================================ FILE: delfin/tests/unit/drivers/ibm/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/ibm/ibm_ds8k/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/ibm/ibm_ds8k/test_ibm_ds8k.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import TestCase, mock sys.modules['delfin.cryptor'] = mock.Mock() from delfin import context from delfin.drivers.ibm.ds8k.rest_handler import RestHandler from delfin.drivers.ibm.ds8k.ds8k import DS8KDriver ACCESS_INFO = { "storage_id": "12345", "rest": { "host": "110.143.132.231", "port": "8443", "username": "username", "password": "cGFzc3dvcmQ=" }, "ssh": { "host": "110.143.132.231", "port": "22", "username": "username", "password": "password", "host_key": "weqewrerwerwerwe" }, "vendor": "IBM", "model": "DS8000", "extra_attributes": { "array_id": "00112233" } } GET_STORAGE = { "data": { "systems": [ { "id": "2107-75BXG71", "name": "TDCUOB_DS8870", "state": "online", "release": "7.5.1", "bundle": "87.51.103.5120", "MTM": "2423-961", "sn": "75BXG71", "wwnn": "5005076304FFD7EF", "cap": "1655709892608", "capalloc": "1073741824000", "capavail": "581968068608", "capraw": "2516582400000" } ] } } GET_ALL_POOLS = { "data": { "pools": [ { "id": "P0", "link": { "rel": "self", "href": "https:/192.168.1.170:8452/api/v1/pools/P0" }, "name": "test_pool", "node": "0", "stgtype": "fb", "cap": "1655709892608", "capalloc": "1073741824000", "capavail": "581968068608", "overprovisioned": "0.6", "easytier": "managed", "tieralloc": [ { "tier": "ENT", "cap": "1655709892608", "allocated": "1073741824000", "assigned": "0" } ], "threshold": "15", "real_capacity_allocated_on_ese": "0", "virtual_capacity_allocated_on_ese": "0", "eserep": {}, "tserep": {}, "volumes": { "link": { "rel": "self" } } } ] } } GET_ALL_LUNS = { "data": { "volumes": [ { "link": { "rel": "self", "href": "https://{hmc}:443/api/v1/volumes/0000" }, "id": "0000", "name": "mytest", "state": "normal", "cap": "322122547200", "stgtype": "fb", "VOLSER": "", "lss": { "id": "00", "link": { "rel": "self", "href": "https://{hmc}:443/api/lss/00" } }, "allocmethod": "legacy", "tp": "none", "capalloc": "134217728", "MTM": "2107-900", "datatype": "FB 512", "tieralloc": [ { "tier": "ENT", "allocated": "34502" } ], "pool": { "id": "P2", "link": { "rel": "self", "href": "https://{hmc}:443/api/v1/pools/P2" } } } ] } } GET_ALL_LUNS_NULL = { "data": { "volumes": [] } } GET_ALL_ALERTS = { "data": { "events": [ { "id": "SEfe", "type": "HostPortStateChanged", "severity": "error", "time": "2014-04-20T13:00:23-0700", "resource_id": "1152922127280127616", "formatted_parameter": ["10000090FA383E80", "Logged Off", "Logged In", "NISCSIHostPortID: ""IBM.2107-75BXG71/12"], "description": "Host port 10000090FA383E80 state logged in." } ] } } GET_ALL_PORTS = { "data": { "ioports": [ { "id": "I0000", "link": { "rel": "self", "href": "https:/192.168.1.170:8452/api/v1/ioports/I0000" }, "state": "online", "protocol": "FC-AL", "wwpn": "50050763040017EF", "type": "Fibre Channel-SW", "speed": "8 Gb/s", "loc": "U1400.1B1.RJ55380-P1-C1-T0" }, { "id": "I0005", "link": { "rel": "self", "href": "https:/192.168.1.170:8452/api/v1/ioports/I0005" }, "state": "online", "protocol": "SCSI-FCP", "wwpn": "50050763044057EF", "type": "Fibre Channel-SW", "speed": "8 Gb/s", "loc": "U1400.1B1.RJ55380-P1-C1-T5" } ] } } GET_ALL_CONTROLLERS = { 'data': { 'nodes': [ { 'id': '00', 'state': 'online' }, { 'id': '01', 'state': 'online' } ] } } TOKEN_RESULT = { "server": { "status": "ok", "code": "200", "message": "Operation done successfully." }, "token": { "token": "ddb1743a", "expired_time": "2014-08-25T03:28:15-0700", "max_idle_interval": "1800000" } } TRAP_INFO = { "1.3.6.1.2.1.1.3.0": "0", '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.103.1.18.2.0', '1.3.6.1.4.1.1139.103.1.18.1.1': 'eeeeeeeee', '1.3.6.1.4.1.1139.103.1.18.1.3': 'ddddddd', '1.3.6.1.4.1.1139.103.1.18.1.4': 'this is test', '1.3.6.1.4.1.1139.103.1.18.1.5': '2020/11/20 14:10:10', '1.3.6.1.4.1.1139.103.1.18.1.2': 'test' } storage_result = { 'name': 'TDCUOB_DS8870', 'vendor': 'IBM', 'model': '2423-961', 'status': 'normal', 'serial_number': '75BXG71', 'firmware_version': '7.5.1', 'location': '', 'total_capacity': 1655709892608, 'raw_capacity': 2516582400000, 'used_capacity': 1073741824000, 'free_capacity': 581968068608 } pool_result = [ { 'name': 'test_pool_0', 'storage_id': '12345', 'native_storage_pool_id': 'P0', 'status': 'abnormal', 'storage_type': 'block', 'total_capacity': 1655709892608, 'used_capacity': 1073741824000, 'free_capacity': 581968068608 } ] volume_result = [ { 'name': 'mytest_0000', 'storage_id': '12345', 'description': '', 'status': 'normal', 'native_volume_id': '0000', 'native_storage_pool_id': 'P2', 'wwn': '', 'type': 'thick', 'total_capacity': 322122547200, 'used_capacity': 134217728, 'free_capacity': 321988329472 } ] alert_result = [ { 'alert_id': 'HostPortStateChanged', 'alert_name': 'Host port 10000090FA383E80 state logged in.', 'severity': 'Critical', 'description': 'Host port 10000090FA383E80 state logged in.', 'category': 'Fault', 'type': 'EquipmentAlarm', 'sequence_number': 'SEfe', 'occur_time': 1397970023000, 'resource_type': 'Storage' } ] port_result = [ { 'name': 'U1400.1B1.RJ55380-P1-C1-T0', 'storage_id': '12345', 'native_port_id': 'I0000', 'location': 'U1400.1B1.RJ55380-P1-C1-T0', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'fc', 'logical_type': '', 'speed': 8000000000, 'max_speed': 8000000000, 'wwn': '50:05:07:63:04:00:17:EF' }, { 'name': 'U1400.1B1.RJ55380-P1-C1-T5', 'storage_id': '12345', 'native_port_id': 'I0005', 'location': 'U1400.1B1.RJ55380-P1-C1-T5', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'fc', 'logical_type': '', 'speed': 8000000000, 'max_speed': 8000000000, 'wwn': '50:05:07:63:04:40:57:EF' } ] contrl_result = [ { 'name': '00', 'storage_id': '12345', 'native_controller_id': '00', 'status': 'normal' }, { 'name': '01', 'storage_id': '12345', 'native_controller_id': '01', 'status': 'normal' } ] trap_result = { 'alert_id': 'ddddddd', 'alert_name': 'test', 'severity': 'Critical', 'category': 'Fault', 'type': 'EquipmentAlarm', 'occur_time': 1605852610000, 'description': 'this is test', 'resource_type': 'Storage', 'location': 'eeeeeeeee' } GET_INITORATORS = { "data": { "host_ports": [ { "wwpn": "50050763030813A2", "state": "logged in", "hosttype": "VMware", "addrdiscovery": "lunpolling", "lbs": "512", "host": { "name": "myhost" } } ] } } INIT_RESULT = [ { 'name': '50050763030813A2', 'storage_id': '12345', 'native_storage_host_initiator_id': '50050763030813A2', 'wwn': '50050763030813A2', 'status': 'online', 'type': 'unknown', 'native_storage_host_id': 'myhost' } ] GET_ALL_HOSTS = { "data": { "hosts": [ { "name": "test_host", "state": "online", "hosttype": "VMware", "addrmode": "SCSI mask", "addrdiscovery": "lunpolling", "lbs": "512" } ] } } HOST_RESULT = [ { 'name': 'test_host', 'storage_id': '12345', 'native_storage_host_id': 'test_host', 'os_type': 'VMware ESX', 'status': 'normal' } ] GET_HOST_MAPPING = { "data": { "mappings": [ { "lunid": "00", "volume": { "id": "0005" } } ] } } VIEW_RESULT = [ { 'name': '00_test_host', 'native_storage_host_id': 'test_host', 'storage_id': '12345', 'native_volume_id': '0005', 'native_masking_view_id': '00_test_host' } ] class TestDS8KDriver(TestCase): @mock.patch.object(RestHandler, 'get_rest_info') def test_get_storage(self, mock_storage): RestHandler.login = mock.Mock(return_value=None) mock_storage.return_value = GET_STORAGE storage = DS8KDriver(**ACCESS_INFO).get_storage(context) self.assertDictEqual(storage, storage_result) @mock.patch.object(RestHandler, 'get_rest_info') def test_list_storage_pools(self, mock_pool): RestHandler.login = mock.Mock(return_value=None) mock_pool.return_value = GET_ALL_POOLS pool = DS8KDriver(**ACCESS_INFO).list_storage_pools(context) self.assertEqual(pool, pool_result) def test_list_volumes(self): RestHandler.login = mock.Mock(return_value=None) RestHandler.get_rest_info = mock.Mock( side_effect=[GET_ALL_POOLS, GET_ALL_LUNS]) vol = DS8KDriver(**ACCESS_INFO).list_volumes(context) self.assertEqual(vol, volume_result) @mock.patch.object(RestHandler, 'get_rest_info') def test_list_alerts(self, mock_alert): RestHandler.login = mock.Mock(return_value=None) mock_alert.return_value = GET_ALL_ALERTS alert = DS8KDriver(**ACCESS_INFO).list_alerts(context) alert[0]['occur_time'] = alert_result[0]['occur_time'] self.assertEqual(alert, alert_result) @mock.patch.object(RestHandler, 'call_with_token') def test_call_and_login(self, mock_token): with self.assertRaises(Exception) as exc: mock_token.return_value = mock.MagicMock( status_code=401, text='Authentication has failed') DS8KDriver(**ACCESS_INFO).rest_handler.login() self.assertEqual('Invalid username or password.', str(exc.exception)) RestHandler.login = mock.Mock(return_value=None) mock_token.return_value = mock.MagicMock(status_code=401) DS8KDriver(**ACCESS_INFO).rest_handler.call('') @mock.patch.object(RestHandler, 'get_rest_info') def test_list_ports(self, mock_port): RestHandler.login = mock.Mock(return_value=None) mock_port.return_value = GET_ALL_PORTS port = DS8KDriver(**ACCESS_INFO).list_ports(context) self.assertEqual(port, port_result) @mock.patch.object(RestHandler, 'get_rest_info') def test_list_list_controllers(self, mock_contrl): RestHandler.login = mock.Mock(return_value=None) mock_contrl.return_value = GET_ALL_CONTROLLERS controller = DS8KDriver(**ACCESS_INFO).list_controllers(context) self.assertEqual(controller, contrl_result) @mock.patch.object(RestHandler, 'get_rest_info') def test_host_initiators(self, mock_init): RestHandler.login = mock.Mock(return_value=None) mock_init.return_value = GET_INITORATORS initiators = DS8KDriver( **ACCESS_INFO).list_storage_host_initiators(context) self.assertEqual(initiators, INIT_RESULT) @mock.patch.object(RestHandler, 'get_rest_info') def test_hosts(self, mock_host): RestHandler.login = mock.Mock(return_value=None) mock_host.return_value = GET_ALL_HOSTS hosts = DS8KDriver(**ACCESS_INFO).list_storage_hosts(context) self.assertEqual(hosts, HOST_RESULT) @mock.patch.object(RestHandler, 'get_rest_info') def test_masking_views(self, mock_view): RestHandler.login = mock.Mock(return_value=None) mock_view.side_effect = [GET_ALL_HOSTS, GET_HOST_MAPPING] views = DS8KDriver(**ACCESS_INFO).list_masking_views(context) self.assertEqual(views, VIEW_RESULT) ================================================ FILE: delfin/tests/unit/drivers/ibm/storwize_svc/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import TestCase, mock import paramiko from delfin.common import constants try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET from delfin.drivers.utils.tools import Tools sys.modules['delfin.cryptor'] = mock.Mock() from delfin import context from delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler from delfin.drivers.ibm.storwize_svc.storwize_svc import StorwizeSVCDriver from delfin.drivers.utils.ssh_client import SSHPool class Request: def __init__(self): self.environ = {'delfin.context': context.RequestContext()} pass UNSECURE_ALGORITHMS = { "ciphers": [ "aes128-cbc", "aes192-cbc", "aes256-cbc", "blowfish-cbc", "3des-cbc" ], "macs": [ "hmac-sha1-96", "hmac-md5", "hmac-md5-96" ], "keys": [ "ecdsa-sha2-nistp256", "ecdsa-sha2-nistp384", "ecdsa-sha2-nistp521", "ssh-dss" ], "kex": [ "diffie-hellman-group14-sha256", "diffie-hellman-group-exchange-sha1", "diffie-hellman-group14-sha1", "diffie-hellman-group1-sha1" ]} ACCESS_INFO = { "storage_id": "12345", "vendor": "hpe", "model": "3par", "rest": { "host": "10.0.0.1", "port": 8443, "username": "user", "password": "pass" }, "ssh": { "host": "110.143.132.231", "port": 22, "username": "user", "password": "pass", "pub_key": "ddddddddddddddddddddddddd" } } system_info = """id 00000200A1207E1F name Cluster_192.168.70.125 location local partnership total_mdisk_capacity 8.1TB space_in_mdisk_grps 8.1TB space_allocated_to_vdisks 5.06TB total_free_space 3.1TB total_vdiskcopy_capacity 5.51TB total_used_capacity 5.05TB total_overallocation 67 total_vdisk_capacity 5.51TB total_allocated_extent_capacity 5.07TB statistics_status on statistics_frequency 5 cluster_locale en_US time_zone 246 Asia/Shanghai code_level 7.4.0.11 (build 103.29.1609070000) console_IP 51.10.58.200:443 id_alias 00000200A1007E1F gm_link_tolerance 300 gm_inter_cluster_delay_simulation 0 gm_intra_cluster_delay_simulation 0 gm_max_host_delay 5 email_reply email_contact email_contact_primary email_contact_alternate email_contact_location email_contact2 email_contact2_primary email_contact2_alternate email_state stopped inventory_mail_interval 0 cluster_ntp_IP_address cluster_isns_IP_address iscsi_auth_method none iscsi_chap_secret auth_service_configured no auth_service_enabled no auth_service_url auth_service_user_name auth_service_pwd_set no auth_service_cert_set no auth_service_type tip relationship_bandwidth_limit 25 tier ssd tier_capacity 0.00MB tier_free_capacity 0.00MB tier enterprise tier_capacity 0.00MB tier_free_capacity 0.00MB tier nearline tier_capacity 8.13TB tier_free_capacity 3.06TB has_nas_key no layer storage rc_buffer_size 48 compression_active no compression_virtual_capacity 0.00MB compression_compressed_capacity 0.00MB compression_uncompressed_capacity 0.00MB cache_prefetch on email_organization email_machine_address email_machine_city email_machine_state XX email_machine_zip email_machine_country total_drive_raw_capacity 10.92TB compression_destage_mode off local_fc_port_mask 1111111111111111111111111111111 partner_fc_port_mask 11111111111111111111111111111 high_temp_mode off topology standard topology_status rc_auth_method none vdisk_protection_time 15 vdisk_protection_enabled no product_name IBM Storwize V7000 max_replication_delay 0 partnership_exclusion_threshold 315 """ enclosure_info = """id:status:type:managed:IO_id:IO_group_name:product_MTM 1:online:control:yes:0:io_grp0:2076-124:78N16G4:2:2:2:2:24:0:0 """ pools_info = """id name status mdisk_count vdisk_count capacity 1 mdiskgrp0 online 1 101 8.13TB 1024 3.06TB """ pool_info = """id 1 name mdiskgrp0 status online mdisk_count 1 vdisk_count 101 capacity 8.13TB extent_size 1024 free_capacity 3.06TB virtual_capacity 5.51TB used_capacity 5.05TB real_capacity 5.06TB overallocation 67 warning 80 easy_tier auto easy_tier_status balanced tier ssd tier_mdisk_count 0 tier_capacity 0.00MB tier_free_capacity 0.00MB tier enterprise tier_mdisk_count 0 tier_capacity 0.00MB tier_free_capacity 0.00MB tier nearline tier_mdisk_count 1 tier_capacity 8.13TB tier_free_capacity 3.06TB compression_active no compression_virtual_capacity 0.00MB compression_compressed_capacity 0.00MB compression_uncompressed_capacity 0.00MB site_id site_name parent_mdisk_grp_id 1 parent_mdisk_grp_name mdiskgrp0 child_mdisk_grp_count 0 child_mdisk_grp_capacity 0.00MB type parent encrypt no """ volumes_info = """id name IO_group_id IO_group_name status 0 V7000LUN_Mig 0 io_grp0 online 1 """ volume_info = """id:0 name:V7000LUN_Mig IO_group_id:0 IO_group_name:io_grp0 status:online mdisk_grp_id:1 mdisk_grp_name:mdiskgrp0 capacity:50.00GB type:striped formatted:no mdisk_id: mdisk_name: FC_id: FC_name: RC_id: RC_name: vdisk_UID:60050768028401F87C00000000000000 throttling:0 preferred_node_id:3 fast_write_state:empty cache:readwrite udid: fc_map_count:0 sync_rate:50 copy_count:1 se_copy_count:0 filesystem: mirror_write_priority:latency RC_change:no compressed_copy_count:0 access_IO_group_count:1 last_access_time:190531130236 parent_mdisk_grp_id:1 parent_mdisk_grp_name:mdiskgrp0 copy_id:0 status:online sync:yes primary:yes mdisk_grp_id:1 mdisk_grp_name:mdiskgrp0 type:striped mdisk_id: mdisk_name: fast_write_state:empty used_capacity:50.00GB real_capacity:50.00GB free_capacity:0.00MB overallocation:100 autoexpand: warning: grainsize: se_copy:no easy_tier:on easy_tier_status:balanced tier:ssd tier_capacity:0.00MB tier:enterprise tier_capacity:0.00MB tier:nearline tier_capacity:50.00GB compressed_copy:no uncompressed_used_capacity:50.00GB parent_mdisk_grp_id:1 parent_mdisk_grp_name:mdiskgrp0 """ alerts_info = """sequence_number last_timestamp object_type object_id 101 201111165750 node 3 node1 """ alert_info = """sequence_number 101 first_timestamp 201111165750 first_timestamp_epoch 1605085070 last_timestamp 201111165750 last_timestamp_epoch 1605085070 object_type node object_id 3 object_name node1 copy_id reporting_node_id 3 reporting_node_name node1 root_sequence_number event_count 1 status message fixed no auto_fixed no notification_type warning event_id 980221 event_id_text Error log cleared error_code error_code_text machine_type 2076124 serial_number 78N16G4 FRU None fixed_timestamp fixed_timestamp_epoch callhome_type none sense1 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 sense2 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 sense3 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 sense4 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 sense5 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 sense6 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 sense7 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 sense8 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 """ trap_info = { '1.3.6.1.2.1.1.3.0': '0', '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.2.6.190.3', '1.3.6.1.4.1.2.6.190.4.1': '# Machine Type = 2076124', '1.3.6.1.4.1.2.6.190.4.2': '# Serial Number = 78N16G4', '1.3.6.1.4.1.2.6.190.4.3': '# Error ID = 981004 : FC discovery occurred, ' 'no configuration changes were detected', '1.3.6.1.4.1.2.6.190.4.4': '# Error Code = ', '1.3.6.1.4.1.2.6.190.4.5': '# System Version = 7.4.0.11 (build 103.29.' '1609070000)', '1.3.6.1.4.1.2.6.190.4.6': '# FRU = None ', '1.3.6.1.4.1.2.6.190.4.7': '# System Name = Cluster_192.168.70.125', '1.3.6.1.4.1.2.6.190.4.8': '# Node ID = 3', '1.3.6.1.4.1.2.6.190.4.9': '# Error Sequence Number = 165', '1.3.6.1.4.1.2.6.190.4.10': '# Timestamp = Tue Nov 10 09:08:27 2020', '1.3.6.1.4.1.2.6.190.4.11': '# Object Type = cluster', '1.3.6.1.4.1.2.6.190.4.12': '# Object ID = 0', '1.3.6.1.4.1.2.6.190.4.17': '# Object Name = Cluster_192.168.70.125', '1.3.6.1.4.1.2.6.190.4.15': '# Copy ID = ', '1.3.6.1.4.1.2.6.190.4.16': '# Machine Part Number = ', '1.3.6.1.4.1.2.6.190.4.13': '# Additional Data (0 -> 63) = 01080000018A0', '1.3.6.1.4.1.2.6.190.4.14': '# Additional Data (64 -> 127) = 00000000000', 'transport_address': '51.10.58.200', 'storage_id': '4992d7f5-4f73-4123-a27b-6e27889f3852' } storage_result = { 'name': 'Cluster_192.168.70.125', 'vendor': 'IBM', 'model': 'IBM Storwize V7000', 'status': 'normal', 'serial_number': '00000200A1207E1F', 'firmware_version': '7.4.0.11', 'location': 'local', 'total_capacity': 8961019766374, 'raw_capacity': 8961019766374, 'subscribed_capacity': 0, 'used_capacity': 5552533720268, 'free_capacity': 3408486046105 } pool_result = [ { 'name': 'mdiskgrp0', 'storage_id': '12345', 'native_storage_pool_id': '1', 'description': '', 'status': 'normal', 'storage_type': 'block', 'subscribed_capacity': 6058309069045, 'total_capacity': 8939029533818, 'used_capacity': 5552533720268, 'free_capacity': 3364505580994 } ] volume_result = [ { 'description': '', 'status': 'normal', 'total_capacity': 53687091200, 'used_capacity': 53687091200, 'type': 'thick', 'free_capacity': 0, 'native_volume_id': '0', 'deduplicated': True, 'native_storage_pool_id': '1', 'wwn': '60050768028401F87C00000000000000', 'compressed': False, 'name': 'V7000LUN_Mig', 'storage_id': '12345' } ] alert_result = [ { 'type': 'EquipmentAlarm', 'location': 'node1', 'category': 'Fault', 'occur_time': 1605085070000, 'sequence_number': '101', 'resource_type': 'node', 'alert_name': 'Error log cleared', 'severity': 'warning', 'alert_id': '980221', 'description': 'Error log cleared' } ] trap_alert_result = { 'alert_id': '981004', 'type': 'EquipmentAlarm', 'severity': 'Informational', 'sequence_number': '165', 'description': 'FC discovery occurred, no configuration changes ' 'were detected', 'occur_time': 1604970507000, 'alert_name': 'FC discovery occurred, no configuration changes ' 'were detected', 'resource_type': 'cluster', 'location': 'Cluster_192.168.70.125', 'category': 'Fault' } get_all_controllers = """id name 2 node_165084 """ get_single_controller = """id 2 id 2 name node_165084 UPS_serial_number 100025I194 WWNN 500507680100EF7C status online IO_group_id 0 IO_group_name io_grp0 partner_node_id 4 partner_node_name node1 config_node yes UPS_unique_id 2040000085641244 port_id 500507680140EF7C port_status active port_speed 8Gb port_id 500507680130EF7C port_status active port_speed 8Gb port_id 500507680110EF7C port_status active port_speed 8Gb port_id 500507680120EF7C port_status active port_speed 8Gb hardware CG8 iscsi_name iqn.1986-03.com.ibm:2145.cluster8.44.162.140.node165084 iscsi_alias failover_active no failover_name node1 failover_iscsi_name iqn.1986-03.com.ibm:2145.cluster8.44.162.140.node1 failover_iscsi_alias panel_name 165084 enclosure_id canister_id enclosure_serial_number service_IP_address 8.44.162.142 service_gateway 8.44.128.1 service_subnet_mask 255.255.192.0 service_IP_address_6 service_gateway_6 service_prefix_6 service_IP_mode static service_IP_mode_6 site_id site_name identify_LED off product_mtm 2145-CG8 code_level 7.8.1.11 (build 135.9.1912100725000) serial_number 75PVZNA machine_signature 0214-784E-C029-0147 """ get_controller_cpu = """id,2 name,node_165084 status,online IO_group_id,0 IO_group_name,io_grp0 hardware,CG8 actual_different,no actual_valid,yes memory_configured,24 memory_actual,24 memory_valid,yes cpu_count,1 cpu_socket,1 cpu_configured,6 core Intel(R) Xeon(R) CPU E5645 @ 2.40GHz cpu_actual,6 core Intel(R) Xeon(R) CPU E5645 @ 2.40GHz cpu_valid,yes adapter_count,3 adapter_location,1 adapter_configured,Four port 8Gb/s FC adapter adapter_actual,Four port 8Gb/s FC adapter adapter_valid,yes adapter_location,0 adapter_configured,Two port 1Gb/s Ethernet adapter adapter_actual,Two port 1Gb/s Ethernet adapter adapter_valid,yes adapter_location,2 adapter_configured,none adapter_actual,none adapter_valid,yes ports_different,no """ controller_result = [ { 'name': 'node_165084', 'storage_id': '12345', 'native_controller_id': '2', 'status': 'normal', 'soft_version': '7.8.1.11', 'location': 'node_165084', 'cpu_info': '6 core Intel(R) Xeon(R) CPU E5645 @ 2.40GHz', 'cpu_count': 1 } ] get_all_disks = """id name 4 mdisk4 """ get_single_disk = """id 4 name mdisk4 status offline mode managed mdisk_grp_id 1 mdisk_grp_name Pool0_NBE capacity 2.0TB quorum_index block_size 512 controller_name NBEPOC_target_Dorado5000V6 ctrl_type 4 ctrl_WWNN 210030E98EE1914C controller_id 41 path_count 0 max_path_count 0 ctrl_LUN_# 0000000000000001 UID 630e98e100e1914c1aa793ae0000001900000000000000000000000000000000 preferred_WWPN active_WWPN fast_write_state empty raid_status raid_level redundancy strip_size spare_goal spare_protection_min balanced tier tier0_flash slow_write_priority fabric_type fc site_id site_name easy_tier_load medium encrypt no distributed no drive_class_id drive_count 0 stripe_width 0 rebuild_areas_total rebuild_areas_available rebuild_areas_goal dedupe no preferred_iscsi_port_id active_iscsi_port_id replacement_date """ disk_result = [ { 'name': 'mdisk4', 'storage_id': '12345', 'native_disk_id': '4', 'capacity': 2199023255552, 'status': 'offline', 'physical_type': 'fc', 'native_disk_group_id': 'Pool0_NBE', 'location': 'NBEPOC_target_Dorado5000V6_mdisk4' } ] get_all_fcports = """id fc_io_port_id 0 1 """ get_single_fcport = """id 0 fc_io_port_id 1 port_id 1 type fc port_speed 8Gb node_id 1 node_name node1 WWPN 500507680140EF3E nportid 850600 status active switch_WWPN 200650EB1A8A59B8 fpma N/A vlanid N/A fcf_MAC N/A attachment switch cluster_use local_partner adapter_location 1 adapter_port_id 1 fabric_WWN 100050EB1A8A59B8 """ get_iscsiport_1 = """id 1 node_id 1 node_name node1 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d7:5a:94 duplex Full state unconfigured speed 1Gb/s failover no mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 id 1 node_id 1 node_name node1 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d7:5a:94 duplex Full state unconfigured speed 1Gb/s failover yes mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 id 1 node_id 2 node_name node_165084 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d4:0c:f0 duplex Full state unconfigured speed 1Gb/s failover no mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 id 1 node_id 2 node_name node_165084 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d4:0c:f0 duplex Full state unconfigured speed 1Gb/s failover yes mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 """ get_iscsiport_2 = """id 2 node_id 1 node_name node1 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d7:5a:94 duplex Full state unconfigured speed 1Gb/s failover no mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 id 2 node_id 1 node_name node1 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d7:5a:94 duplex Full state unconfigured speed 1Gb/s failover yes mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 id 2 node_id 2 node_name node_165084 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d4:0c:f0 duplex Full state unconfigured speed 1Gb/s failover no mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 id 2 node_id 2 node_name node_165084 IP_address mask gateway IP_address_6 prefix_6 gateway_6 MAC 34:40:b5:d4:0c:f0 duplex Full state unconfigured speed 1Gb/s failover yes mtu 1500 link_state active host remote_copy 0 host_6 remote_copy_6 0 remote_copy_status remote_copy_status_6 vlan vlan_6 adapter_location 0 adapter_port_id 1 dcbx_state lossless_iscsi lossless_iscsi6 iscsi_priority_tag fcoe_priority_tag pfc_enabled_tags pfc_disabled_tags priority_group_0 priority_group_1 priority_group_2 priority_group_3 priority_group_4 priority_group_5 priority_group_6 priority_group_7 bandwidth_allocation storage storage_6 """ get_file_list = 'id filename\n' \ '1 Nn_stats_78N16G4-2_211201_161110\n' \ '2 Nn_stats_78N16G4-2_211201_161210\n' \ '3 Nm_stats_78N16G4-2_211201_161110\n' \ '4 Nm_stats_78N16G4-2_211201_161210\n' \ '5 Nv_stats_78N16G4-2_211201_161110\n' \ '6 Nv_stats_78N16G4-2_211201_161210' file_nv_1611 = """ """ file_nv_1612 = """ """ file_nm_1611 = """ """ file_nm_1612 = """ """ file_nn_1611 = """ """ file_nn_1612 = """ """ file_nn_node_1611 = """ """ file_nn_node_1612 = """ """ resource_metrics = { 'volume': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime', 'ioSize', 'readIoSize', 'writeIoSize', ], 'port': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime' ], 'disk': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime' ], 'controller': [ 'iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime' ] } port_result = [ { 'name': 'node1_0', 'storage_id': '12345', 'native_port_id': '0', 'location': 'node1_0', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'fc', 'speed': 8000000000, 'native_parent_id': 'node1', 'wwn': '500507680140EF3E' }, { 'name': 'node1_1', 'storage_id': '12345', 'native_port_id': 'node1_1', 'location': 'node1_1', 'connection_status': 'connected', 'health_status': 'abnormal', 'type': 'eth', 'speed': 1000000000, 'native_parent_id': 'node1', 'mac_address': '34:40:b5:d7:5a:94', 'ipv4': '', 'ipv4_mask': '', 'ipv6': '' }, { 'name': 'node_165084_1', 'storage_id': '12345', 'native_port_id': 'node_165084_1', 'location': 'node_165084_1', 'connection_status': 'connected', 'health_status': 'abnormal', 'type': 'eth', 'speed': 1000000000, 'native_parent_id': 'node_165084', 'mac_address': '34:40:b5:d4:0c:f0', 'ipv4': '', 'ipv4_mask': '', 'ipv6': '' }, { 'name': 'node1_2', 'storage_id': '12345', 'native_port_id': 'node1_2', 'location': 'node1_2', 'connection_status': 'connected', 'health_status': 'abnormal', 'type': 'eth', 'speed': 1000000000, 'native_parent_id': 'node1', 'mac_address': '34:40:b5:d7:5a:94', 'ipv4': '', 'ipv4_mask': '', 'ipv6': '' }, { 'name': 'node_165084_2', 'storage_id': '12345', 'native_port_id': 'node_165084_2', 'location': 'node_165084_2', 'connection_status': 'connected', 'health_status': 'abnormal', 'type': 'eth', 'speed': 1000000000, 'native_parent_id': 'node_165084', 'mac_address': '34:40:b5:d4:0c:f0', 'ipv4': '', 'ipv4_mask': '', 'ipv6': '' } ] perf_get_port_fc = [ { 'name': '0', 'storage_id': '12345', 'native_port_id': '0', 'location': 'node1_0', 'connection_status': 'connected', 'health_status': 'normal', 'type': 'fc', 'max_speed': 8589934592, 'native_parent_id': 'node1', 'wwn': '0x50050768021065cb' } ] metrics_result = [ constants.metric_struct( name='iops', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='readIops', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='writeIops', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='throughput', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='readThroughput', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='writeThroughput', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='responseTime', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'ms' }, values={ 1638346330000: 0 }), constants.metric_struct(name='ioSize', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'KB' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='readIoSize', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'KB' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='writeIoSize', labels={ 'storage_id': '12345', 'resource_type': 'volume', 'resource_id': '0', 'resource_name': 'powerha', 'type': 'RAW', 'unit': 'KB' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'resource_name': 'mdisk1', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='readIops', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'resource_name': 'mdisk1', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='writeIops', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'resource_name': 'mdisk1', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='throughput', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'resource_name': 'mdisk1', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='readThroughput', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'resource_name': 'mdisk1', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='writeThroughput', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'resource_name': 'mdisk1', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='responseTime', labels={ 'storage_id': '12345', 'resource_type': 'disk', 'resource_id': '0', 'resource_name': 'mdisk1', 'type': 'RAW', 'unit': 'ms' }, values={ 1638346330000: 0 }), constants.metric_struct(name='iops', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '0', 'resource_name': '0', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='readIops', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '0', 'resource_name': '0', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='writeIops', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '0', 'resource_name': '0', 'type': 'RAW', 'unit': 'IOPS' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='throughput', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '0', 'resource_name': '0', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='readThroughput', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '0', 'resource_name': '0', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 }), constants.metric_struct(name='writeThroughput', labels={ 'storage_id': '12345', 'resource_type': 'port', 'resource_id': '0', 'resource_name': '0', 'type': 'RAW', 'unit': 'MB/s' }, values={ 1638346330000: 0.0 })] get_all_hosts = """id name 1 host1 """ get_host_summery = """id 38 name tjy_test_iscsi port_count 3 type generic mask 11111111111111111111111111111111111111 iogrp_count 4 status online site_id site_name host_cluster_id host_cluster_name WWPN 21000024FF543B0C node_logged_in_count 1 state inactive WWPN 21000024FF438098 node_logged_in_count 1 state active WWPN 21000024FF41C461 node_logged_in_count 1 state inactive """ host_result = [ { 'name': 'tjy_test_iscsi', 'storage_id': '12345', 'native_storage_host_id': '38', 'os_type': 'Unknown', 'status': 'normal' } ] get_all_views = """id name SCSI_id vdisk_id vdisk_name 2 Solaris11.3_57 0 27 PG_1 6 hwstorage_8.44.133.80 0 24 wyktest 7 VNX-WIN8-TEST 0 31 SVC-WIN8_test 14 pd_esx6 0 65 pd_taiping0 14 pd_esx6 1 66 pd_taiping1 14 pd_esx6 2 67 pd_taiping2 """ view_result = [ { 'name': '2_27', 'native_storage_host_id': '2', 'storage_id': '12345', 'native_volume_id': '27', 'native_masking_view_id': '2_27' }, { 'name': '6_24', 'native_storage_host_id': '6', 'storage_id': '12345', 'native_volume_id': '24', 'native_masking_view_id': '6_24' }, { 'name': '7_31', 'native_storage_host_id': '7', 'storage_id': '12345', 'native_volume_id': '31', 'native_masking_view_id': '7_31' }, { 'name': '14_65', 'native_storage_host_id': '14', 'storage_id': '12345', 'native_volume_id': '65', 'native_masking_view_id': '14_65' }, { 'name': '14_66', 'native_storage_host_id': '14', 'storage_id': '12345', 'native_volume_id': '66', 'native_masking_view_id': '14_66' }, { 'name': '14_67', 'native_storage_host_id': '14', 'storage_id': '12345', 'native_volume_id': '67', 'native_masking_view_id': '14_67' } ] init_result = [ { 'name': '21000024FF543B0C', 'storage_id': '12345', 'native_storage_host_initiator_id': '21000024FF543B0C', 'wwn': '21000024FF543B0C', 'status': 'online', 'type': 'fc', 'native_storage_host_id': '38' }, { 'name': '21000024FF438098', 'storage_id': '12345', 'native_storage_host_initiator_id': '21000024FF438098', 'wwn': '21000024FF438098', 'status': 'online', 'type': 'fc', 'native_storage_host_id': '38' }, { 'name': '21000024FF41C461', 'storage_id': '12345', 'native_storage_host_initiator_id': '21000024FF41C461', 'wwn': '21000024FF41C461', 'status': 'online', 'type': 'fc', 'native_storage_host_id': '38' } ] def create_driver(): SSHHandler.login = mock.Mock( return_value={""}) return StorwizeSVCDriver(**ACCESS_INFO) class TestStorwizeSvcStorageDriver(TestCase): driver = create_driver() def test_init(self): SSHHandler.login = mock.Mock( return_value={""}) StorwizeSVCDriver(**ACCESS_INFO) def test_list_storage(self): SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=[system_info]) storage = self.driver.get_storage(context) self.assertDictEqual(storage, storage_result) def test_list_storage_pools(self): SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=[pools_info, pool_info]) pool = self.driver.list_storage_pools(context) self.assertDictEqual(pool[0], pool_result[0]) def test_list_volumes(self): SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=[volumes_info, volume_info]) volume = self.driver.list_volumes(context) self.assertDictEqual(volume[0], volume_result[0]) def test_list_alerts(self): query_para = { "begin_time": 1605085070000, "end_time": 1605085070000 } SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=[alerts_info, alert_info]) alert = self.driver.list_alerts(context, query_para) self.assertEqual(alert[0].get('alert_id'), alert_result[0].get('alert_id')) def test_parse_alert(self): alert = self.driver.parse_alert(context, trap_info) trap_alert_result['occur_time'] = alert['occur_time'] self.assertEqual(alert, trap_alert_result) def test_clear_alert(self): alert_id = 101 SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=['CMMVC8275E']) self.driver.clear_alert(context, alert_id) with self.assertRaises(Exception) as exc: SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=['can not find alert']) self.driver.clear_alert(context, alert_id) self.assertIn('The results are invalid. can not find alert', str(exc.exception)) @mock.patch.object(SSHHandler, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_controllers(self, mock_ssh_get, mock_control): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_control.side_effect = [get_all_controllers, get_single_controller, get_controller_cpu] controller = self.driver.list_controllers(context) self.assertEqual(controller, controller_result) @mock.patch.object(SSHHandler, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_disks(self, mock_ssh_get, mock_disk): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_disk.side_effect = [get_all_disks, get_single_disk] disk = self.driver.list_disks(context) self.assertEqual(disk, disk_result) @mock.patch.object(SSHHandler, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_ports(self, mock_ssh_get, mock_port): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_port.side_effect = [get_all_fcports, get_single_fcport, get_iscsiport_1, get_iscsiport_2] port = self.driver.list_ports(context) self.assertEqual(port, port_result) @mock.patch.object(SSHHandler, 'get_fc_port') @mock.patch.object(Tools, 'get_remote_file_to_xml') @mock.patch.object(SSHHandler, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_collect_perf_metrics(self, mock_ssh_get, mock_file_list, mock_get_file, mock_fc_port): start_time = 1637346270000 end_time = 1639346330000 storage_id = '12345' mock_ssh_get.return_value = {paramiko.SSHClient()} mock_file_list.return_value = get_file_list mock_get_file.return_value = [ET.fromstring(file_nv_1611), ET.fromstring(file_nv_1612), ET.fromstring(file_nm_1611), ET.fromstring(file_nm_1612), ET.fromstring(file_nn_1611), ET.fromstring(file_nn_1612), ET.fromstring(file_nn_node_1611), ET.fromstring(file_nn_node_1612) ] mock_fc_port.return_value = perf_get_port_fc metrics = self.driver.collect_perf_metrics(context, storage_id, resource_metrics, start_time, end_time) self.assertEqual(metrics[0][1]['resource_name'], 'powerha') @mock.patch.object(SSHHandler, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_hosts(self, mock_ssh_get, mock_host): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_host.side_effect = [get_all_hosts, get_host_summery] host = self.driver.list_storage_hosts(context) self.assertEqual(host, host_result) @mock.patch.object(SSHHandler, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_masking_views(self, mock_ssh_get, mock_view): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_view.return_value = get_all_views view = self.driver.list_masking_views(context) self.assertEqual(view, view_result) @mock.patch.object(SSHHandler, 'do_exec') @mock.patch.object(SSHPool, 'get') def test_list_host_initiators(self, mock_ssh_get, mock_host): mock_ssh_get.return_value = {paramiko.SSHClient()} mock_host.side_effect = [get_all_hosts, get_host_summery] init = self.driver.list_storage_host_initiators(context) self.assertEqual(init, init_result) ================================================ FILE: delfin/tests/unit/drivers/macro_san/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/macro_san/ms/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/macro_san/ms/test_ms_stor.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import TestCase, mock import paramiko import six from paramiko import SSHClient sys.modules['delfin.cryptor'] = mock.Mock() import time from oslo_utils import units from delfin.common import constants from delfin.drivers.macro_san.ms import consts from delfin.drivers.macro_san.ms.macro_ssh_client import MacroSanSSHPool from oslo_log import log from delfin import context from delfin.drivers.macro_san.ms.ms_handler import MsHandler from delfin.drivers.macro_san.ms.ms_stor import MacroSanDriver LOG = log.getLogger(__name__) ACCESS_INFO = { "storage_id": "12345", "vendor": "macro_san", "model": "macro_san", "ssh": { "host": "110.143.133.200", "port": 22, "username": "admin", "password": "admin" } } POOLS_INFO = """Last login: Wed Jul 13 15:05:45 2022 from 192.168.3.235\r (null)@(null) ODSP CLI> pool mgt getlist\r Storage Pools Sum: 4\r \r Name: SYS-Pool\r Type: Traditional\r Is Foreign: No\r Is Reserved: Yes\r Cell Size: 1GB\r All Capacity: 7144GB\r Used Capacity: 961GB\r Used Capacity Rate: 13.5%\r Free Capacity(RAID): 6183GB\r Free Capacity(HDD RAID): 0GB\r Free Capacity(SSD RAID): 6183GB\r \r Name: pool-1\r Type: Traditional\r Is Foreign: No\r Is Reserved: No\r Cell Size: 1GB\r All Capacity: 0GB\r Used Capacity: 0GB\r Used Capacity Rate: 0.0%\r Free Capacity(RAID): 0GB\r Free Capacity(HDD RAID): 0GB\r Free Capacity(SSD RAID): 0GB\r \r Command completed successfully.\r (null)@(null) ODSP CLI>""" RAID_SYS_POOL = """(null)@(null) ODSP CLI> raid mgt getlist -p SYS-Pool\r RAIDs Sum: 1\r \r Name: SYS-RAID\r RAID Level: RAID5\r Health Status: Normal\r Total Capacity: 7144GB\r Free Capacity: 6183GB\r Disk Type: SSD\r Data Disks Sum: 8\r Dedicated Spare Disks Sum: 1\r \r Command completed successfully.\r (null)@(null) ODSP CLI>""" RAID_POOL_1 = """(null)@(null) ODSP CLI> raid mgt getlist -p pool-1\r RAIDs Sum: 0\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ POOLS_DATA = [{'name': 'SYS-Pool', 'storage_id': '12345', 'native_storage_pool_id': 'SYS-Pool', 'status': 'normal', 'storage_type': 'block', 'total_capacity': 7670811590656.0, 'used_capacity': 1031865892864.0, 'free_capacity': 6638945697792.0}, {'name': 'pool-1', 'storage_id': '12345', 'native_storage_pool_id': 'pool-1', 'status': 'unknown', 'storage_type': 'block', 'total_capacity': 0.0, 'used_capacity': 0.0, 'free_capacity': 0.0}] VOLUME_INFO = """(null)@(null) ODSP CLI> lun mgt getlist -p SYS-Pool\r SYS-Pool: 18 LUNs (18 Normal 0 Faulty)\r \r Name : SYS-LUN-Config\r LUN id : 0\r Total Size : 4GB\r Current Owner(SP) : SP1\r Health Status : Normal\r Cache Status : Disable\r Mapped to Client : No\r \r \r Name : SYS-LUN-Log\r LUN id : 1\r Total Size : 4GB\r Current Owner(SP) : SP1\r Health Status : Normal\r Cache Status : Disable\r Mapped to Client : No\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VOLUME_QUERY_ONE = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Config\r Name : SYS-LUN-Config\r Device ID: 600B342F1B0F9ABD7BABD272BD0000DA\r Total Size : 4GB\r Current Owner(SP) : SP1\r Owner(Pool) : SYS-Pool\r Health Status : Normal\r Is Reserved : Yes\r Is Foreign : No\r Created Time: 2021/12/23 11:26:40\r Cache Set Status: Disable\r Cache Status: Disable\r LUN Distr Mode : concatenated\r Mapped to Client : No\r Command completed successfully.\r (null)@(null) ODSP CLI> """ VOLUME_QUERY_TWO = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Log\r Name : SYS-LUN-Log\r Device ID: 600B342EF209582D8D07D1EE4D0000DA\r Total Size : 4GB\r Current Owner(SP) : SP1\r Owner(Pool) : SYS-Pool\r Health Status : Normal\r Is Reserved : Yes\r Is Foreign : No\r Created Time: 2021/12/23 11:26:44\r Cache Set Status: Disable\r Cache Status: Disable\r LUN Distr Mode : concatenated\r Mapped to Client : No\r Command completed successfully.\r (null)@(null) ODSP CLI>""" VOLUME_ONE_NEW = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Config\r Name: SYS-LUN-Config\r WWN: 600B342F1B0F9ABD7BABD272BD0000DA\r Type: Standard-LUN\r Is RDV LUN: No\r Total Logical Size: 4GB (209715200sector)\r Total Physical Size: 4GB (209715200sector)\r Thin-Provisioning: Disable\r Default Owner(SP): SP1\r Current Owner(SP): SP1\r Owner(Group): N/A\r Owner(Pool): SYS-Pool\r Health Status: Normal\r Ua_type: ALUA\r Is Reserved: No\r Is Foreign: No\r Write Zero Status: Disable\r Created Time: 2020/03/02 17:49:15\r Read Cache: Enable\r Read Cache Status: Enable\r Write Cache: Enable\r Write Cache Status: Enable\r Mapped to Client: No\r LUN UUID: 0x50b34200-154800ee-a8746477-234b74a7\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VOLUME_TWO_NEW = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Log\r Name: SYS-LUN-Log\r WWN: 600B3423899AC1EDB125DCAE6D4E00D0\r NGUID: 040F09004EE6CA2500B342B11EAC9938\r Type: Standard-LUN\r Is RDV LUN: No\r Total Logical Size: 1GB (2097152sector)\r Total Physical Size: 1GB (2097152sector)\r Thin-Provisioning: Enable\r Thin-LUN Extent Size: 16KB\r Thin-LUN Private-area Allocate Mode: SSD RAID First\r Thin-LUN Data-area Allocate Mode: HDD RAID First\r Thin-LUN Expand Threshold: 30GB\r Thin-LUN Expand Step Size: 50GB\r Thin-LUN Allocated Physical Capacity: 1GB\r Thin-LUN Allocated Physical Capacity Percentage: 100.0%\r Thin-LUN Used Capacity: 3956KB\r Thin-LUN Used Capacity Percentage: 0.0%\r Thin-LUN Unused Capacity: 1,048,576KB\r Thin-LUN Unused Capacity Percentage: 100.0%\r Thin-LUN Distribute Mode: Single\r Thin-LUN Dedup Switch: Disable\r Thin-LUN Compress Switch: Disable\r Default Owner(SP): SP1\r Current Owner(SP): SP1\r Owner(Group): N/A\r Owner(Pool): Pool-1\r Health Status: Normal\r Ua_type: ALUA\r Is Reserved: No\r Is Foreign: No\r Created Time: 2022/08/29 17:36:37\r Read Cache: Enable\r Read Cache Status: Enable\r Write Cache: Enable\r Write Cache Status: Enable\r Mapped to Client: No\r LUN UUID: 0x00b34204-0f09004e-e6ca25b1-1eac9938\r Thin-LUN private UUID: 0x00b34204-0f09006f-6c27276c-a6d3f14b\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VOLUME_TWO_INFO = """(null)@(null) ODSP CLI> lun mgt getlist -p pool-1\r pool-1: 0 LUNs (0 Normal 0 Faulty)\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VOLUMES_DATA = [ {'name': 'SYS-LUN-Config', 'storage_id': '12345', 'status': 'normal', 'native_volume_id': 'SYS-LUN-Config', 'native_storage_pool_id': 'SYS-Pool', 'type': 'thick', 'wwn': '600B342F1B0F9ABD7BABD272BD0000DA', 'total_capacity': 4294967296.0, 'used_capacity': 4294967296.0, 'free_capacity': 0.0}, {'name': 'SYS-LUN-Log', 'storage_id': '12345', 'status': 'normal', 'native_volume_id': 'SYS-LUN-Log', 'native_storage_pool_id': 'Pool-1', 'type': 'thin', 'wwn': '600B3423899AC1EDB125DCAE6D4E00D0', 'total_capacity': 1073741824.0, 'used_capacity': 4050944.0, 'free_capacity': 1069690880.0}] THICK_VOLUMES_DATA = [ {'name': 'SYS-LUN-Config', 'storage_id': '12345', 'status': 'normal', 'native_volume_id': 'SYS-LUN-Config', 'native_storage_pool_id': 'SYS-Pool', 'type': 'thick', 'wwn': '600B342F1B0F9ABD7BABD272BD0000DA', 'total_capacity': 4294967296.0, 'used_capacity': 4294967296.0, 'free_capacity': 0.0}, {'name': 'SYS-LUN-Log', 'storage_id': '12345', 'status': 'normal', 'native_volume_id': 'SYS-LUN-Log', 'native_storage_pool_id': 'SYS-Pool', 'type': 'thick', 'wwn': '600B342EF209582D8D07D1EE4D0000DA', 'total_capacity': 4294967296.0, 'used_capacity': 4294967296.0, 'free_capacity': 0.0}] VERSION_INFO = """(null)@(null) ODSP CLI> system mgt getversion\r [SP1 Version]\r SP1 ODSP_MSC Version: V2.0.14T04\r SP1 ODSP_Driver Version: V607\r \r [SP2 Version]\r SP2 ODSP_MSC Version: V2.0.14T04\r SP2 ODSP_Driver Version: V607\r \r [SP3 Version]\r SP3 ODSP_MSC Version: N/A\r SP3 ODSP_Driver Version: N/A\r \r [SP4 Version]\r SP4 ODSP_MSC Version: N/A\r SP4 ODSP_Driver Version: N/A\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ CPU_INFO = """(null)@(null) ODSP CLI> system mgt getcpuinfo\r [SP1 CPU Information]\r SP1 Processor0 ID: 0\r SP1 Processor0 Vendor_id: GenuineIntel\r SP1 Processor0 CPU Frequency: 2200.000 MHz\r SP1 Processor1 ID: 1\r SP1 Processor1 Vendor_id: GenuineIntel\r SP1 Processor1 CPU Frequency: 2200.000 MHz\r SP1 Processor2 ID: 2\r SP1 Processor2 Vendor_id: GenuineIntel\r SP1 Processor2 CPU Frequency: 2200.000 MHz\r SP1 Processor3 ID: 3\r SP1 Processor3 Vendor_id: GenuineIntel\r SP1 Processor3 CPU Frequency: 2200.000 MHz\r \r [SP2 CPU Information]\r SP2 Processor0 ID: 0\r SP2 Processor0 Vendor_id: GenuineIntel\r SP2 Processor0 CPU Frequency: 2200.000 MHz\r SP2 Processor1 ID: 1\r SP2 Processor1 Vendor_id: GenuineIntel\r SP2 Processor1 CPU Frequency: 2200.000 MHz\r SP2 Processor2 ID: 2\r SP2 Processor2 Vendor_id: GenuineIntel\r SP2 Processor2 CPU Frequency: 2200.000 MHz\r SP2 Processor3 ID: 3\r SP2 Processor3 Vendor_id: GenuineIntel\r SP2 Processor3 CPU Frequency: 2200.000 MHz\r \r Command completed successfully.\r (null)@(null) ODSP CLI>""" HA_STATUS = """(null)@(null) ODSP CLI> ha mgt getstatus\r SP1 HA Running Status : dual--single\r SP2 HA Running Status : dual--single\r SP3 HA Running Status : absent--poweroff\r SP4 HA Running Status : absent--poweroff\r \r Command completed successfully.\r (null)@(null) ODSP CLI>""" HA_STATUS_NEW = """(null)@(null) ODSP CLI> ha mgt getstatus\r System HA Status : normal\r SP1 HA Running Status : single\r SP2 HA Running Status : single\r \r Command completed successfully.\r (null)@(null) ODSP CLI>""" CONTROLLERS_DATA = [ {'name': 'SP1', 'storage_id': '12345', 'native_controller_id': 'SP1', 'status': 'normal', 'location': 'SP1', 'soft_version': 'V2.0.14T04', 'cpu_info': 'GenuineIntel@2200.000MHz', 'cpu_count': 1}, {'name': 'SP2', 'storage_id': '12345', 'native_controller_id': 'SP2', 'status': 'normal', 'location': 'SP2', 'soft_version': 'V2.0.14T04', 'cpu_info': 'GenuineIntel@2200.000MHz', 'cpu_count': 1}, {'name': 'SP3', 'storage_id': '12345', 'native_controller_id': 'SP3', 'status': 'offline', 'location': 'SP3', 'soft_version': 'N/A', 'cpu_info': ''}, {'name': 'SP4', 'storage_id': '12345', 'native_controller_id': 'SP4', 'status': 'offline', 'location': 'SP4', 'soft_version': 'N/A', 'cpu_info': ''}] DSU_INFO = """(null)@(null) ODSP CLI> dsu mgt getlist\r DSUs Sum:1\r \r Name: DSU-7:1:1\r Disks: 2\r DSU EP1 SAS address: 500b342000dd26ff\r DSU EP2 SAS address: 500b342000dd273f\r \r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ DISKS_INFO = """(null)@(null) ODSP CLI> disk mgt getlist -d 7:1:1\r Disks Sum: 2\r \r Name: Disk-7:1:1:1\r Type: SSD\r Capacity: 893GB\r Vendor: ATA\r RPMs: 0\r Health Status: Normal\r Disk Role: Data disk\r Owner(Pool): SYS-Pool\r Owner(RAID): SYS-RAID\r \r Name: Disk-7:1:1:2\r Type: SSD\r Capacity: 893GB\r Vendor: ATA\r RPMs: 0\r Health Status: Normal\r Disk Role: Data disk\r Owner(Pool): SYS-Pool\r Owner(RAID): SYS-RAID\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ DISK_ONE = """(null)@(null) ODSP CLI> disk mgt query -d 7:1:1:1\r Name: Disk-7:1:1:1\r Type: HDD\r Capacity: 893GB\r Vendor: ATA\r Model: Micron_5200_MTFDDAK960TDD\r FW Version: U004\r Serial Number: 18311E8D2787\r Size: 2.5inch\r RPMs: 0\r Read Cache Setting: Enable\r Write Cache Setting: Enable\r Health Status: Normal\r Role: Data disk\r Owner(Pool): SYS-Pool\r Owner(RAID): SYS-RAID\r Locating Status: NO\r SP1 Disk Online Status: Online\r SP2 Disk Online Status: Online\r SP3 Disk Online Status: Online\r SP4 Disk Online Status: Online\r SSD Estimated Life Remaining: N/A\r SSD Estimated Time Remaining: N/A\r SSD Applicable Scene: N/A\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ DISKS_TWO = """(null)@(null) ODSP CLI> disk mgt query -d 7:1:1:2\r Name: Disk-7:1:1:2\r Type: SSD\r Capacity: 893GB\r Vendor: ATA\r Model: Micron_5200_MTFDDAK960TDD\r FW Version: U004\r Serial Number: 18311E8D2C03\r Size: 2.5inch\r RPMs: 0\r Read Cache Setting: Enable\r Write Cache Setting: Enable\r Health Status: Normal\r Role: Data disk\r Owner(Pool): SYS-Pool\r Owner(RAID): SYS-RAID\r Locating Status: NO\r SP1 Disk Online Status: Online\r SP2 Disk Online Status: Online\r SP3 Disk Online Status: Online\r SP4 Disk Online Status: Online\r SSD Estimated Life Remaining: N/A\r SSD Estimated Time Remaining: N/A\r SSD Applicable Scene: N/A\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ DISKS_DATA = [{'name': 'Disk-7:1:1:1', 'storage_id': '12345', 'native_disk_id': 'Disk-7:1:1:1', 'serial_number': '18311E8D2787', 'manufacturer': 'ATA', 'model': 'Micron_5200_MTFDDAK960TDD', 'firmware': 'U004', 'location': 'Disk-7:1:1:1', 'speed': 0, 'capacity': 958851448832.0, 'status': 'normal', 'physical_type': 'hdd', 'logical_type': 'data'}, {'name': 'Disk-7:1:1:2', 'storage_id': '12345', 'native_disk_id': 'Disk-7:1:1:2', 'serial_number': '18311E8D2C03', 'manufacturer': 'ATA', 'model': 'Micron_5200_MTFDDAK960TDD', 'firmware': 'U004', 'location': 'Disk-7:1:1:2', 'speed': 0, 'capacity': 958851448832.0, 'status': 'normal', 'physical_type': 'ssd', 'logical_type': 'data'}] FC_INFO = """(null)@(null) ODSP CLI> client target queryportlist\r fc port-1:4:1\r wwn : 50:0b:34:20:02:fe:b5:0d\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r fc port-1:4:2\r wwn : 50:0b:34:20:02:fe:b5:0e\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r fc port-1:4:3\r wwn : 50:0b:34:20:02:fe:b5:0f\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r fc port-1:4:4\r wwn : 50:0b:34:20:02:fe:b5:10\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r fc port-2:4:1\r wwn : 50:0b:34:20:02:fe:b3:0d\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r fc port-2:4:2\r wwn : 50:0b:34:20:02:fe:b3:0e\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r fc port-2:4:3\r wwn : 50:0b:34:20:02:fe:b3:0f\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r fc port-2:4:4\r wwn : 50:0b:34:20:02:fe:b3:10\r online state : 2\r actual speed : 0\r port topology : 0\r initiator num : 0\r Command completed successfully.\r (null)@(null) ODSP CLI> """ SAS_INFO = """(null)@(null) ODSP CLI>system sas getportlist -c 1:1\r SAS Controller 1:1 Ports Sum:2\r \r SAS-1:1:1 Link Status: Full-Linkup\r SAS-1:1:1 PHY Max Speed: 12Gbps\r SAS-1:1:1 PHY1 Speed: 12Gbps\r SAS-1:1:1 PHY2 Speed: 12Gbps\r SAS-1:1:1 PHY3 Speed: 12Gbps\r SAS-1:1:1 PHY4 Speed: 12Gbps\r \r SAS-1:1:2 Link Status: Full-Linkup\r SAS-1:1:2 PHY Max Speed: 12Gbps\r SAS-1:1:2 PHY1 Speed: 6Gbps\r SAS-1:1:2 PHY2 Speed: 6Gbps\r SAS-1:1:2 PHY3 Speed: 6Gbps\r SAS-1:1:2 PHY4 Speed: 6Gbps\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ PORT_DATA = [{'native_port_id': 'FC-1:4:1', 'name': 'FC-1:4:1', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-1:4:1', 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b5:0d'}, {'native_port_id': 'FC-1:4:2', 'name': 'FC-1:4:2', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-1:4:2', 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b5:0e'}, {'native_port_id': 'FC-1:4:3', 'name': 'FC-1:4:3', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-1:4:3', 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b5:0f'}, {'native_port_id': 'FC-1:4:4', 'name': 'FC-1:4:4', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-1:4:4', 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b5:10'}, {'native_port_id': 'FC-2:4:1', 'name': 'FC-2:4:1', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-2:4:1', 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b3:0d'}, {'native_port_id': 'FC-2:4:2', 'name': 'FC-2:4:2', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-2:4:2', 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b3:0e'}, {'native_port_id': 'FC-2:4:3', 'name': 'FC-2:4:3', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-2:4:3', 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b3:0f'}, {'native_port_id': 'FC-2:4:4', 'name': 'FC-2:4:4', 'type': 'fc', 'logical_type': 'physical', 'connection_status': 'disconnected', 'health_status': 'unknown', 'location': 'FC-2:4:4', 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, 'wwn': '50:0b:34:20:02:fe:b3:10'}, {'native_port_id': 'SAS-1:1:1', 'name': 'SAS-1:1:1', 'type': 'sas', 'logical_type': 'physical', 'connection_status': 'connected', 'health_status': 'unknown', 'location': 'SAS-1:1:1', 'storage_id': '12345', 'native_parent_id': 'SP1', 'max_speed': 12000000000, 'speed': 12000000000}, {'native_port_id': 'SAS-1:1:2', 'name': 'SAS-1:1:2', 'type': 'sas', 'logical_type': 'physical', 'connection_status': 'connected', 'health_status': 'unknown', 'location': 'SAS-1:1:2', 'storage_id': '12345', 'native_parent_id': 'SP1', 'max_speed': 12000000000, 'speed': 6000000000}] PARSE_ALERT_INFO = { '1.3.6.1.2.1.1.3.0': '2995472', '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.35904.1.3.3', '1.3.6.1.2.1.25.1.2': '2022-07-12 17:43:40', '1.3.6.1.4.1.35904.1.2.1.1': 'Storage-1', '1.3.6.1.4.1.35904.1.2.1.4.1': 'Battery_expired', '1.3.6.1.4.1.35904.1.2.1.4.2': 'SP1', '1.3.6.1.4.1.35904.1.2.1.4.3': "SSU-7:1:1's battery '2' becomes expired," " please prepare a new module and replace" " it as soon as possible.", '1.3.6.1.4.1.35904.1.2.1.4.4': '2', 'transport_address': '192.168.3.235', 'storage_id': '05e007e4-62ef-4e24-a14e-57a8ee8e5bf3'} PARSE_ALERT_DATA = { 'alert_id': '2995472', 'severity': 'Major', 'category': 'Fault', 'occur_time': 1657619020000, 'description': "SSU-7:1:1's battery '2' becomes expired, please prepare" " a new module and replace it as soon as possible.", 'location': 'Storage-1:SP1', 'type': 'EquipmentAlarm', 'resource_type': 'Storage', 'alert_name': '电池模块超期', 'match_key': 'ec62c3cdd862da9b0f8da6d03d97d76e'} INITIATOR_INFO = """(null)@(null) ODSP CLI> client initiator getlist -t all\r Initiators Sum: 3\r Initiator Alias: VMWare\r Initiator WWN: 20:18:f8:2e:3f:f9:85:54\r Type: FC\r OS: AIX\r IP Address Used in Last iSCSI Login Session: N/A\r Mapped Client: Client-1\r Mapped Targets Sum: 2\r Mapped LUNs Sum: 6\r \r Initiator Alias: ds\r Initiator WWN: 20:ab:30:48:56:01:fc:31\r Type: FC\r OS: Other\r IP Address Used in Last iSCSI Login Session: N/A\r Mapped Client: Client-2\r Mapped Targets Sum: 1\r Mapped LUNs Sum: 1\r \r Initiator Alias: dc\r Initiator WWN: 42:25:dc:35:ab:69:12:cb\r Type: FC\r OS: HP_UNIX\r IP Address Used in Last iSCSI Login Session: N/A\r Mapped Client: Client-2\r Mapped Targets Sum: 1\r Mapped LUNs Sum: 2\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ INITIATOR_DATA = [ {'native_storage_host_initiator_id': '20:18:f8:2e:3f:f9:85:54', 'native_storage_host_id': 'Client-1', 'name': '20:18:f8:2e:3f:f9:85:54', 'alias': 'VMWare', 'type': 'fc', 'status': 'unknown', 'wwn': '20:18:f8:2e:3f:f9:85:54', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '20:ab:30:48:56:01:fc:31', 'native_storage_host_id': 'Client-2', 'name': '20:ab:30:48:56:01:fc:31', 'alias': 'ds', 'type': 'fc', 'status': 'unknown', 'wwn': '20:ab:30:48:56:01:fc:31', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '42:25:dc:35:ab:69:12:cb', 'native_storage_host_id': 'Client-2', 'name': '42:25:dc:35:ab:69:12:cb', 'alias': 'dc', 'type': 'fc', 'status': 'unknown', 'wwn': '42:25:dc:35:ab:69:12:cb', 'storage_id': '12345'}] UNKNOWN_COMMAND = """(null)@(null) ODSP CLI> client host gethostlist % Unknown command. (null)@(null) ODSP CLI> """ HOSTS_INFO = """(null)@(null) ODSP CLI> client mgt getclientlist\r Clients Sum: 7\r \r Name: Client-1\r Description: ds mss\r Mapped Initiators Num: 1\r \r Name: Client-2\r Description: \r Mapped Initiators Num: 2\r \r Name: Client-3\r Description: sss\r Mapped Initiators Num: 0\r \r Name: Client-4\r Description: dsd\r Mapped Initiators Num: 0\r \r Name: Client-5\r Description: ds\r Mapped Initiators Num: 0\r \r Name: Client-6\r Description: \r Mapped Initiators Num: 0\r \r Name: 5\r Description: \r Mapped Initiators Num: 0\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ HOST_INFO_NEW = """(null)@(null) ODSP CLI> client host gethostlist\r Host Sum: 1\r \r Host Name: Host-1\r OS: Windows2008\r IP Address: 192.168.1.20\r Description: Server 1\r Location: Room-201\r Initiators Sum: 4\r iSCSI Initiators Sum: 2\r FC Initiators Sum: 2\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ HOST_DATA = [{'name': 'Client-1', 'storage_id': '12345', 'native_storage_host_id': 'Client-1', 'os_type': 'AIX', 'status': 'normal', 'description': 'ds mss'}, {'name': 'Client-2', 'storage_id': '12345', 'native_storage_host_id': 'Client-2', 'os_type': 'HP-UX', 'status': 'normal', 'description': ''}, {'name': 'Client-3', 'storage_id': '12345', 'native_storage_host_id': 'Client-3', 'os_type': 'Unknown', 'status': 'normal', 'description': 'sss'}, {'name': 'Client-4', 'storage_id': '12345', 'native_storage_host_id': 'Client-4', 'os_type': 'Unknown', 'status': 'normal', 'description': 'dsd'}, {'name': 'Client-5', 'storage_id': '12345', 'native_storage_host_id': 'Client-5', 'os_type': 'Unknown', 'status': 'normal', 'description': 'ds'}, {'name': 'Client-6', 'storage_id': '12345', 'native_storage_host_id': 'Client-6', 'os_type': 'Unknown', 'status': 'normal', 'description': ''}, {'name': '5', 'storage_id': '12345', 'native_storage_host_id': '5', 'os_type': 'Unknown', 'status': 'normal', 'description': ''}] HOST_DATA_NEW = [{'name': 'Host-1', 'storage_id': '12345', 'native_storage_host_id': 'Host-1', 'os_type': 'Windows', 'status': 'normal', 'description': 'Server 1', 'ip_address': '192.168.1.20'}] HOST_GROUPS_INFO = """(null)@(null) ODSP CLI> client hostgroup gethglist\r Host Groups Sum: 1\r \r Host Group Name: Host-Group-1\r Description: Host Group\r Hosts Sum: 1\r Initiators Sum: 4\r iSCSI Initiators Sum: 2\r FC Initiators Sum: 2\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ HOST_GROUPS_H_INFO = """(null)@(null) ODSP CLI> client hostgroup gethostlist\ -n Host-Group-1\r Hosts Sum: 1\r \r HostName: Host-1\r OS: Windows2008\r IP Address: 192.168.1.20\r Description: Server1\r Location: Room-201\r Initiators Sum: 4\r iSCSI Initiators Sum: 2\r FC Initiators Sum: 2\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ HOST_GROUPS_DATA = { 'storage_host_groups': [ {'name': 'Host-Group-1', 'storage_id': '12345', 'native_storage_host_group_id': 'Host-Group-1', 'description': 'Host Group'} ], 'storage_host_grp_host_rels': [ {'storage_id': '12345', 'native_storage_host_group_id': 'Host-Group-1', 'native_storage_host_id': 'Host-1'} ] } VOLUME_GROUPS_INFO = """(null)@(null) ODSP CLI> client lungroup getlglist\r LUN Group Sum: 1\r \r LUN Group Name: LUN-Group-1\r Description: LUN Group description\r LUNs Sum: 4\r Local LUNs Sum: 4\r Remote LUNs Sum: 0\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VOLUME_GROUPS_N_INFO = """(null)@(null) ODSP CLI> client lungroup getlunlist\ -n LUN-Group-1\r LUNs Sum: 1\r \r LUN Name: LUN-0001/N/A\r Location: Local/Remote\r LUN Capacity: 10GB (20971520sector)/N/A\r LUN WWN: 600B34249837CEBDC611DCB12DD500D6/N/A\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VOLUME_GROUP_DATA = {'volume_groups': [ {'name': 'LUN-Group-1', 'storage_id': '12345', 'native_volume_group_id': 'LUN-Group-1', 'description': 'LUN Group description'}], 'vol_grp_vol_rels': [ {'storage_id': '12345', 'native_volume_group_id': 'LUN-Group-1', 'native_volume_id': 'LUN-0001/N/A'}]} VIEWS_ONE = """(null)@(null) ODSP CLI> client mgt getsharelunlist -n Client-1\r LUNs Sum: 6\r \r LUN Name: Test_Lun-1\r LUN Capacity: 10GB\r LUN WWN: 600B3427C77BBDFD2FF0DBA82D0000DB\r LUN ID: 0\r Access Mode: Read-Write\r Thin-Provisioning: Disable\r \r LUN Name: Test_Lun-2\r LUN Capacity: 10GB\r LUN WWN: 600B342A316B328D7035DD724D0000DB\r LUN ID: 1\r Access Mode: Read-Write\r Thin-Provisioning: Disable\r \r LUN Name: Test_Lun-3\r LUN Capacity: 10GB\r LUN WWN: 600B342AB2FE2ACDBC63D8B0DD0000DB\r LUN ID: 2\r Access Mode: Read-Write\r Thin-Provisioning: Disable\r \r LUN Name: Test_Lun-4\r LUN Capacity: 10GB\r LUN WWN: 600B342B328A722D55F7DEF5DD0000DB\r LUN ID: 3\r Access Mode: Read-Write\r Thin-Provisioning: Disable\r \r LUN Name: Test_Lun-5\r LUN Capacity: 10GB\r LUN WWN: 600B34221067D72D65DFD18C8D0000DB\r LUN ID: 4\r Access Mode: Read-Write\r Thin-Provisioning: Disable\r \r LUN Name: LUN-1\r LUN Capacity: 2GB\r LUN WWN: 600B342A816A4F2D9098DB015D0000DB\r LUN ID: 5\r Access Mode: Read-Write\r Thin-Provisioning: Disable\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VIEW_TWO = """(null)@(null) ODSP CLI> client mgt getsharelunlist -n Client-2\r LUNs Sum: 0\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VIEWS_DATA = [{'native_masking_view_id': 'Client-10', 'name': 'Client-10', 'native_storage_host_id': 'Client-1', 'native_volume_id': '0', 'storage_id': '12345'}, {'native_masking_view_id': 'Client-11', 'name': 'Client-11', 'native_storage_host_id': 'Client-1', 'native_volume_id': '1', 'storage_id': '12345'}, {'native_masking_view_id': 'Client-12', 'name': 'Client-12', 'native_storage_host_id': 'Client-1', 'native_volume_id': '2', 'storage_id': '12345'}, {'native_masking_view_id': 'Client-13', 'name': 'Client-13', 'native_storage_host_id': 'Client-1', 'native_volume_id': '3', 'storage_id': '12345'}, {'native_masking_view_id': 'Client-14', 'name': 'Client-14', 'native_storage_host_id': 'Client-1', 'native_volume_id': '4', 'storage_id': '12345'}, {'native_masking_view_id': 'Client-15', 'name': 'Client-15', 'native_storage_host_id': 'Client-1', 'native_volume_id': '5', 'storage_id': '12345'}] VIEW_NEW_INFO = """client mapview getlist\r Mapviews Sum: 1\r \r Mapview Name: Mapview-1\r Description: Map view\r Host Group Name: Host-Group-1\r Target Group Name: Target-Group-1\r LUN Group Name: LUN-Group-1\r \r Command completed successfully.\r (null)@(null) ODSP CLI> """ VIEWS_NEW_DATA = [{'native_masking_view_id': 'Mapview-1', 'name': 'Mapview-1', 'native_storage_host_group_id': 'Host-Group-1', 'native_volume_group_id': 'LUN-Group-1', 'description': 'Map view', 'storage_id': '12345'}] SYSTEM_QUERY = """(null)@(null) ODSP CLI> system mgt query\r system mgt query\r Device UUID:0x00b34202-fea90000-fa41e0d6-ded905a8\r Command completed successfully.\r (null)@(null) ODSP CLI> """ SYSTEM_QUERY_TWO = """(null)@(null) ODSP CLI> system mgt query\r Device UUID:0x50b34200-0b750056-42ab74ff-6265d80e\r Device Name:Storage-1\r Command completed successfully.\r (null)@(null) ODSP CLI> """ STORAGE_DATA = { 'name': '0x00b34202-fea90000-fa41e0d6-ded905a8', 'vendor': 'MacroSAN', 'status': 'normal', 'serial_number': '110.143.133.200:0x00b34202-fea90000-fa41e0d6-ded905a8', 'firmware_version': 'V2.0.14T04', 'raw_capacity': 1917702897664.0, 'total_capacity': 7670811590656.0, 'used_capacity': 1031865892864.0, 'free_capacity': 6638945697792.0, 'model': '' } STORAGE_TWO_DATA = { 'name': 'Storage-1', 'vendor': 'MacroSAN', 'status': 'normal', 'serial_number': '110.143.133.200:0x50b34200-0b750056-42ab74ff-6265d80e', 'firmware_version': 'V2.0.14T04', 'raw_capacity': 1917702897664.0, 'total_capacity': 7670811590656.0, 'used_capacity': 1031865892864.0, 'free_capacity': 6638945697792.0, 'model': '' } TIMESTAMP = """[root@00-b3-42-04-0f-09 ~]# date +%s\r 1662345266\r [root@00-b3-42-04-0f-09 ~]#""" VERSION_SHOW = """[root@00-b3-42-04-0f-09 ~]# versionshow\r \r SP2 Version:\r ODSP_MSC: V1.5.12T03\r ODSP_DRIVER: V230T03\r BIOS : V166\r BMC : V272P001\r MCPLD : V104\r MPCB : VER.B\r BCB1 : V214\r BCB2 : V214\r BAT1HW : BAT1111A\r BAT2HW : FAN2021A\r IOC1PCB :\r IOC2PCB :\r DSU : 1:1:1\r ODSP_JMC : V221\r ODSP_JMCB: N/A\r EPCB : N/A\r ECPLD : V101\r BAT0_BCB : N/A\r BAT1_BCB : N/A\r \r [root@00-b3-42-04-0f-09 ~]#""" GET_FILE_LIST = """(null)@(null) ODSP CLI> system performance getfilelist\r Performance Statistics Files Sum:2\r SP Name: SP2\r Object Type: DEVICE\r Object Name: Device\r Object Identification: N/A\r File Name: perf_device_SP2_20220920181959.csv\r File Create Time: 2022-09-20 18:19:59\r File Size: 58 KB\r \r SP Name: SP2\r Object Type: SAS PORT\r Object Name: SAS-2:1:1\r Object Identification: N/A\r File Name: perf_sasport_SAS-2_1_1_SP2_20220920181959.csv\r File Create Time: 2022-09-20 18:19:59\r File Size: 56 KB\r \r Command completed successfully.\r (null)@(null) ODSP CLI>""" resource_metrics = { constants.ResourceType.STORAGE: consts.STORAGE_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.PORT: consts.PORT_CAP } def create_driver(): MsHandler.login = mock.Mock( return_value={None}) return MacroSanDriver(**ACCESS_INFO) class test_macro_san_driver(TestCase): driver = create_driver() def test_init(self): MsHandler.login = mock.Mock( return_value={""}) MacroSanDriver(**ACCESS_INFO) def test_get_storage(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[SYSTEM_QUERY, VERSION_INFO, POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO, HA_STATUS, VERSION_INFO, CPU_INFO, HA_STATUS, VERSION_SHOW]) MacroSanSSHPool.create = mock.Mock(__class__) SSHClient.open_sftp = mock.Mock(__class__) storage_object = self.driver.get_storage(context) self.assertDictEqual(storage_object, STORAGE_DATA) def test_get_storage_new(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[SYSTEM_QUERY_TWO, VERSION_INFO, POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO, HA_STATUS_NEW, VERSION_INFO, CPU_INFO, HA_STATUS_NEW, VERSION_SHOW]) MacroSanSSHPool.create = mock.Mock(__class__) SSHClient.open_sftp = mock.Mock(__class__) storage_object = self.driver.get_storage(context) self.assertDictEqual(storage_object, STORAGE_TWO_DATA) def test_list_storage_pools(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1]) pools = self.driver.list_storage_pools(context) self.assertListEqual(pools, POOLS_DATA) def test_list_volumes(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, VOLUME_INFO, VOLUME_QUERY_ONE, VOLUME_QUERY_TWO, VOLUME_TWO_INFO]) volumes = self.driver.list_volumes(context) self.assertListEqual(volumes, THICK_VOLUMES_DATA) def test_list_volumes_new(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, VOLUME_INFO, VOLUME_ONE_NEW, VOLUME_TWO_NEW, VOLUME_TWO_INFO]) volumes = self.driver.list_volumes(context) self.assertListEqual(volumes, VOLUMES_DATA) def test_list_controllers(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[VERSION_INFO, CPU_INFO, HA_STATUS]) controllers = self.driver.list_controllers(context) self.assertListEqual(controllers, CONTROLLERS_DATA) def test_list_disks(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO]) disks = self.driver.list_disks(context) self.assertListEqual(disks, DISKS_DATA) def test_list_ports(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[FC_INFO, HA_STATUS, DSU_INFO, SAS_INFO, None, None, None]) ports = self.driver.list_ports(context) self.assertListEqual(ports, PORT_DATA) def test_parse_alert(self): parse_alert = self.driver.parse_alert(context, PARSE_ALERT_INFO) PARSE_ALERT_DATA['occur_time'] = parse_alert.get('occur_time') self.assertDictEqual(parse_alert, PARSE_ALERT_DATA) def test_list_storage_host_initiators(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[INITIATOR_INFO]) initiators = self.driver.list_storage_host_initiators(context) self.assertListEqual(initiators, INITIATOR_DATA) def test_list_storage_hosts_old(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[consts.UNKNOWN_COMMAND_TAG, INITIATOR_INFO, HOSTS_INFO]) hosts = self.driver.list_storage_hosts(context) self.assertListEqual(hosts, HOST_DATA) def test_list_storage_hosts_new(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[HOST_INFO_NEW]) hosts = self.driver.list_storage_hosts(context) self.assertListEqual(hosts, HOST_DATA_NEW) def test_list_storage_hosts_group(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[HOST_GROUPS_INFO, HOST_GROUPS_H_INFO]) host_groups = self.driver.list_storage_host_groups(context) self.assertDictEqual(host_groups, HOST_GROUPS_DATA) def test_list_volume_groups(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[VOLUME_GROUPS_INFO, VOLUME_GROUPS_N_INFO]) volume_groups = self.driver.list_volume_groups(context) self.assertDictEqual(volume_groups, VOLUME_GROUP_DATA) def test_list_masking_views_old(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[consts.UNKNOWN_COMMAND_TAG, HOSTS_INFO, VIEWS_ONE, VIEW_TWO, VIEW_TWO, VIEW_TWO, VIEW_TWO, VIEW_TWO, VIEW_TWO]) views = self.driver.list_masking_views(context) self.assertListEqual(views, VIEWS_DATA) def test_list_masking_views_new(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[VIEW_NEW_INFO]) views = self.driver.list_masking_views(context) self.assertListEqual(views, VIEWS_NEW_DATA) def test_list_alert(self): block = False try: self.driver.list_alerts(context) except Exception as e: LOG.error(six.text_type(e)) block = True self.assertEqual(block, True) def test_get_latest_perf_timestamp(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[TIMESTAMP]) timestamp = self.driver.get_latest_perf_timestamp(context) times = 1662345240000 self.assertEqual(timestamp, times) def test_get_capabilities(self): capabilities = self.driver.get_capabilities(context) metrics = { 'is_historic': True, 'resource_metrics': { constants.ResourceType.STORAGE: consts.STORAGE_CAP, constants.ResourceType.VOLUME: consts.VOLUME_CAP, constants.ResourceType.PORT: consts.PORT_CAP, constants.ResourceType.DISK: consts.DISK_CAP, } } self.assertDictEqual(capabilities, metrics) def test_collect_perf_metrics(self): MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) MacroSanSSHPool.do_exec_shell = mock.Mock( side_effect=[VERSION_SHOW, GET_FILE_LIST]) MsHandler.down_perf_file = mock.Mock(return_value='') localtime = time.mktime(time.localtime()) * units.k storage_id = 12345 start_time = localtime - 1000 * 60 * 5 end_time = localtime metrics = self.driver.collect_perf_metrics( context, storage_id, resource_metrics, start_time, end_time) self.assertListEqual(metrics, []) ================================================ FILE: delfin/tests/unit/drivers/netapp/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/netapp/netapp_ontap/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/netapp/netapp_ontap/test_constans.py ================================================ # Copyright 2021 The SODA Authors. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ACCESS_INFO = { "storage_id": "12345", "vendor": "hpe", "model": "3par", "ssh": { "host": "192.168.159.130", "port": 22, "username": "admin", "password": "aq114477", }, "rest": { "host": "192.168.159.130", "port": 22, "username": "admin", "password": "cGFzc3dvcmQ=", }, } SYSTEM_INFO = """ ----cluster----\r last login time : 12 456 789\r \r Cluster UUID: 47096983-8018-11eb-bd5b-000c293284bd\r Cluster Name: cl\r Cluster Serial Number: -\r Cluster Location:\r Cluster Contact: \r""" AGGREGATE_INFO = """----cluster----\r last login time : 12 456 789\r \r Aggregate Size Available Used% State #Vols Nodes RAID Status\r --------- -------- --------- ----- ------- ------ --------------------\r aggr0 855MB 42.14MB 95% online 1 cl-01 raid_dp,\r normal\r aggr1 8.79GB 3.98GB 55% online 3 cl-01 raid_dp,\r normal\r aggr2 8.79GB 4.98GB 43% online 3 cl-01 raid_dp,\r normal\r""" VERSION = """----cluster----\r last login time : 12 456 789\r \r NetApp Release 9.8: Fri Aug 19 06:39:33 UTC 2016\r """ SYSTEM_STATUS = """----cluster----\r last login time : 12 456 789\r \r Status\r ---------------\r ok""" DISK_INFO = """----cluster----\r last login time : 12 456 789\r \r Usable Disk Container Container\r Disk Size Shelf Bay Type Type Name Owner\r ---------------- ---------- ----- --- ------- ----------- --------- -----\r NET-1.1 1020254 - 16 FCAL aggregate aggr0 cl-01\r NET-1.2 1020MB - 17 FCAL aggregate aggr1 cl-01\r NET-1.3 1020MB - 18 FCAL aggregate aggr1 cl-01\r NET-1.4 1020MB - 19 FCAL aggregate aggr1 cl-01\r NET-1.5 1020MB - 20 FCAL aggregate aggr1 cl-01\r NET-1.6 1020MB - 21 FCAL aggregate aggr1 cl-01\r NET-1.7 1020MB - 22 FCAL aggregate aggr1 cl-01\r NET-1.8 1020MB - 24 FCAL aggregate aggr2 cl-01\r NET-1.9 1020MB - 16 FCAL aggregate aggr0 cl-01\r NET-1.10 1020MB - 17 FCAL aggregate aggr0 cl-01\r NET-1.11 1020MB - 18 FCAL aggregate aggr1 cl-01\r NET-1.12 1020MB - 19 FCAL aggregate aggr1 cl-01\r NET-1.13 1020MB - 20 FCAL aggregate aggr1 cl-01\r NET-1.14 1020MB - 25 FCAL aggregate aggr2 cl-01\r NET-1.15 1020MB - 26 FCAL aggregate aggr2 cl-01\r NET-1.16 1020MB - 27 FCAL aggregate aggr2 cl-01\r NET-1.17 1020MB - 28 FCAL aggregate aggr2 cl-01\r NET-1.18 1020MB - 21 FCAL aggregate aggr1 cl-01\r NET-1.19 1020MB - 22 FCAL aggregate aggr1 cl-01\r NET-1.20 1020MB - 24 FCAL aggregate aggr1 cl-01\r NET-1.21 1020MB - 25 FCAL aggregate aggr2 cl-01\r NET-1.22 1020MB - 26 FCAL aggregate aggr2 cl-01\r NET-1.23 1020MB - 27 FCAL aggregate aggr2 cl-01\r NET-1.24 1020MB - 28 FCAL aggregate aggr2 cl-01\r NET-1.25 1020MB - 29 FCAL aggregate aggr2 cl-01\r NET-1.26 1020MB - 32 FCAL aggregate aggr2 cl-01\r NET-1.27 1020MB - 29 FCAL aggregate aggr2 cl-01\r NET-1.28 1020MB - 32 FCAL spare Pool0 cl-01\r 28 entries were displayed.""" POOLS_INFO = """----cluster----\r last login time : 12 456 789\r \r Storage Pool Name: Pool1\r UUID of Storage Pool: 60f2f1b9-e60f-11e3\r Nodes Sharing the Storage Pool: node-a, node-b\r Number of Disks in Storage Pool: 2\r Allocation Unit Size: 372.5GB\r Storage Type: SSD\r Storage Pool Usable Size: 1.09TB\r Storage Pool Total Size: 1.45TB\r Is Pool Healthy?: true\r State of the Storage Pool: normal\r Reason for storage pool being unhealthy: -\r Job ID of the Currently Running Operation: - \r \r Storage Pool Name: Pool2\r UUID of Storage Pool: 60f2f1b9-e60f-11e3\r Nodes Sharing the Storage Pool: node-a, node-b\r Number of Disks in Storage Pool: 2\r Allocation Unit Size: 372.5GB\r Storage Type: SSD\r Storage Pool Usable Size: 1.09TB\r Storage Pool Total Size: 1.45TB\r Is Pool Healthy?: true\r State of the Storage Pool: normal\r Reason for storage pool being unhealthy: -\r Job ID of the Currently Running Operation: - \r""" AGGREGATE_DETAIL_INFO = """----cluster----\r last login time : 12 456 789\r \r Aggregate: aggr0\r Storage Type: hdd\r Checksum Style: block\r Number Of Disks: 3\r Mirror: false\r Disks for First Plex: NET-1.9, NET-1.1\r Disks for Mirrored Plex: -\r Partitions for First Plex: -\r Partitions for Mirrored Plex: -\r Node: cl-01\r Free Space Reallocation: off\r HA Policy: cfo\r Ignore Inconsistent: off\r Space Reserved for Snapshot Copies: 5%\r Aggregate Nearly Full Threshold Percent: 97%\r Aggregate Full Threshold Percent: 98%\r Checksum Verification: on\r RAID Lost Write: on\r Enable Thorough Scrub: off\r Hybrid Enabled: false\r Available Size: 0B\r Checksum Enabled: true\r Checksum Status: active\r Cluster: cl\r Home Cluster ID: 47096983-8018-11eb-bd5b\r DR Home ID: -\r DR Home Name: -\r Inofile Version: 4\r Has Mroot Volume: true\r Has Partner Node Mroot Volume: false\r Home ID: 4082368507\r Home Name: cl-01\r Total Hybrid Cache Size: 0B\r Hybrid: false\r Inconsistent: false\r Is Aggregate Home: true\r Max RAID Size: 16\r Flash Pool SSD Tier Maximum RAID Group Size: -\r Owner ID: 4082368507\r Owner Name: cl-01\r Used Percentage: 96%\r Plexes: /aggr0/plex0\r RAID Groups: /aggr0/plex0/rg0 (block)\r RAID Lost Write State: on\r RAID Status: raid_dp, normal\r RAID Type: raid_dp\r SyncMirror Resync Snapshot Frequency in Minutes: 5\r Is Root: true\r Space Used by Metadata for Volume Efficiency: 0B\r Size: 855MB\r State: online\r Maximum Write Alloc Blocks: 0\r Used Size: 0\r Uses Shared Disks: false\r UUID String: a71b1e4e-d151-abebf8\r Number Of Volumes: 1 Is Flash Pool Caching: -\r Is Eligible for Auto Balance Aggregate: false\r State of the aggregate being balanced: ineligible\r Total Physical Used Size: 712.3MB\r Physical Used Percentage: 79%\r State Change Counter for Auto Balancer: 0\r Is Encrypted: false\r SnapLock Type: non-snaplock\r Encryption Key ID: -\r Is in the precommit phase of Copy-Free Transition: false\r Is a 7-Mode transitioning aggregat: false\r Threshold When Aggregate Is Considered Unbalanced (%): 70\r Threshold When Aggregate Is Considered Balanced (%): 40\r Resynchronization Priority: -\r Space Saved by Data Compaction: 0B\r Percentage Saved by Data Compaction: 0%\r Amount of compacted data: 0B\r \r Aggregate: aggr1\r Storage Type: hdd\r Checksum Style: block\r Number Of Disks: 12\r Mirror: false\r Disks for First Plex: NET-1.2, NET-1.11,\r NET-1.12, NET-1.4,\r NET-1.13, NET-1.5,\r NET-1.18, NET-1.6,\r NET-1.19, NET-1.7\r Disks for Mirrored Plex: -\r Partitions for First Plex: -\r Partitions for Mirrored Plex: -\r Node: cl-01\r Free Space Reallocation: off\r HA Policy: sfo\r Ignore Inconsistent: off\r Space Reserved for Snapshot Copies: -\r Aggregate Nearly Full Threshold Percent: 95%\r Aggregate Full Threshold Percent: 98%\r Checksum Verification: on\r RAID Lost Write: on\r Enable Thorough Scrub: off\r Hybrid Enabled: false\r Available Size: 5.97GB\r Checksum Enabled: true\r Checksum Status: active\r Cluster: cl\r Home Cluster ID: 47096983-8018-bd\r DR Home ID: -\r DR Home Name: -\r Inofile Version: 4\r Has Mroot Volume: false\r Has Partner Node Mroot Volume: false\r Home ID: 4082368507\r Home Name: cl-01\r Total Hybrid Cache Size: 0B\r Hybrid: false\r Inconsistent: false\r Is Aggregate Home: true\r Max RAID Size: 16\r Flash Pool SSD Tier Maximum RAID Group Size: -\r Owner ID: 4082368507\r Owner Name: cl-01\r Used Percentage: 32%\r Plexes: /aggr1/plex0\r RAID Groups: /aggr1/plex0/rg0 (block)\r RAID Lost Write State: on\r RAID Status: raid_dp, normal\r RAID Type: raid_dp\r SyncMirror Resync Snapshot Frequency in Minutes: 5\r Is Root: false\r Space Used by Metadata for Volume Efficiency: 0B\r Size: 8.79GB\r State: online\r Maximum Write Alloc Blocks: 0\r Used Size: 2.82GB\r Uses Shared Disks: false\r UUID String: 68ffbbca-eb735\r Number Of Volumes: 3\r Is Flash Pool Caching: -\r Is Eligible for Auto Balance Aggregate: false\r State of the aggregate being balanced: ineligible\r Total Physical Used Size: 154.7MB\r Physical Used Percentage: 2%\r State Change Counter for Auto Balancer: 0\r Is Encrypted: false\r SnapLock Type: non-snaplock\r Encryption Key ID: -\r Is in the precommit phase of Copy-Free Transition: false\r Is a 7-Mode transitioning aggrega: false\r Threshold When Aggregate Is Considered Unbalanced (%): 70 Threshold When Aggregate Is Considered Balanced (%): 40\r Resynchronization Priority: -\r Space Saved by Data Compaction: 0B\r Percentage Saved by Data Compaction: 0%\r Amount of compacted data: 0B\r \r Aggregate: aggr2\r Storage Type: hdd\r Checksum Style: block\r Number Of Disks: 12\r Mirror: false\r Disks for First Plex: NET-1.8, NET-1.21,\r NET-1.14, NET-1.22,\r NET-1.15, NET-1.23,\r NET-1.16, NET-1.24,\r NET-1.17, NET-1.25,\r NET-1.27, NET-1.26\r Disks for Mirrored Plex: -\r Partitions for First Plex: -\r Partitions for Mirrored Plex: -\r Node: cl-01\r Free Space Reallocation: off\r HA Policy: sfo\r Ignore Inconsistent: off\r Space Reserved for Snapshot Copies: -\r Aggregate Nearly Full Threshold Percent: 95%\r Aggregate Full Threshold Percent: 98%\r Checksum Verification: on\r RAID Lost Write: on\r Enable Thorough Scrub: off\r Hybrid Enabled: false\r Available Size: 2.93GB\r Checksum Enabled: true\r Checksum Status: active\r Cluster: cl\r Home Cluster ID: 47096983-8018-\r DR Home ID: -\r DR Home Name: -\r Inofile Version: 4\r Has Mroot Volume: false\r Has Partner Node Mroot Volume: false\r Home ID: 4082368507\r Home Name: cl-01\r Total Hybrid Cache Size: 0B\r Hybrid: false\r Inconsistent: false\r Is Aggregate Home: true\r Max RAID Size: 16\r Flash Pool SSD Tier Maximum RAID Group Size: -\r Owner ID: 4082368507\r Owner Name: cl-01\r Used Percentage: 67%\r Plexes: /aggr2/plex0\r RAID Groups: /aggr2/plex0/rg0 (block)\r RAID Lost Write State: on\r RAID Status: raid_dp, normal\r RAID Type: raid_dp\r SyncMirror Resync Snapshot Frequency in Minutes: 5\r Is Root: false\r Space Used by Metadata for Volume Efficiency: 0B\r Size: 8.79GB\r State: online\r Maximum Write Alloc Blocks: 0\r Used Size: 5.85GB\r Uses Shared Disks: false\r UUID String: b5cfe36e-ea\r Number Of Volumes: 6 Is Flash Pool Caching: -\r Is Eligible for Auto Balance Aggregate: false\r State of the aggregate being balanced: ineligible\r Total Physical Used Size: 68.84MB\r Physical Used Percentage: 1%\r State Change Counter for Auto Balancer: 0\r Is Encrypted: false\r SnapLock Type: non-snaplock\r Encryption Key ID: -\r Is in the precommit phase of Copy-Free Transition: false\r Is a 7-Mode of space: false\r Threshold When Aggregate Is Considered Unbalanced (%): 70\r Threshold When Aggregate Is Considered Balanced (%): 40\r Resynchronization Priority: -\r Space Saved by Data Compaction: 0B\r Percentage Saved by Data Compaction: 0%\r Amount of compacted data: 0B\r 3 entries were displayed.\r """ LUN_INFO = """----cluster----\r last login time : 12 456 789\r \r Vserver Name: svm5\r LUN Path: /vol/lun_0_vol/lun_0\r Volume Name: lun_0_vol\r Qtree Name: ""\r LUN Name: lun_0\r LUN Size: 512MB\r OS Type: linux\r Space Reservation: enabled\r Serial Number: wpEzy]QpkWFm\r Serial Number (Hex): 7770457a795d51706b57466d\r Comment:\r Space Reservations Honored: true\r Space Allocation: disabled\r State: online\r LUN UUID: d4d1c11a-fa21-4ef8-9536-776017748474\r Mapped: unmapped Block Size: 512\r Device Legacy ID: -\r Device Binary ID: -\r Device Text ID: -\r Read Only: false\r Fenced Due to Restore: false\r Used Size: 0\r Maximum Resize Size: 64.00GB\r Creation Time: 5/7/2021 18:34:52\r Class: regular\r Node Hosting the LUN: cl-01\r QoS Policy Group: -\r Caching Policy Name: -\r Clone: false\r Clone Autodelete Enabled: false\r Inconsistent Import: false\r """ FS_INFO = """----cluster----\r last login time : 12 456 789\r \r Vserver Name: cl-01\r Volume Name: vol0\r Aggregate Name: aggr0\r List of Aggregates for FlexGroup Constituents: -\r Volume Size: 807.3MB\r Volume Data Set ID: -\r Volume Master Data Set ID: -\r Volume State: online\r Volume Style: flex\r Extended Volume Style: flexvol\r Is Cluster-Mode Volume: false\r Is Constituent Volume: false\r Export Policy: -\r User ID: -\r Group ID: -\r Security Style: -\r UNIX Permissions: ------------\r Junction Path: -\r Junction Path Source: -\r Junction Active: -\r Junction Parent Volume: -\r Comment: -\r Available Size: 135.4MB\r Filesystem Size: 807.3MB\r Total User-Visible Size: 766.9MB\r Used Size: -\r Used Percentage: 83%\r Volume Nearly Full Threshold Percent: 95%\r Volume Full Threshold Percent: 98%\r Maximum Autosize (for flexvols only): 968.7MB\r Minimum Autosize: 807.3MB\r Autosize Grow Threshold Percentage: 85%\r Autosize Shrink Threshold Percentage: 50%\r Autosize Mode: off\r Total Files (for user-visible data): 24539\r Files Used (for user-visible data): 16715\r Space Guarantee in Effect: true\r Space SLO in Effect: true\r Space SLO: none\r Space Guarantee Style: volume\r Fractional Reserve: 100%\r Volume Type: RW\r Snapshot Directory Access Enabled: true\r Space Reserved for Snapshot Copies: 5%\r Snapshot Reserve Used: 604%\r Snapshot Policy: -\r Creation Time: Mon Mar 08 14:09:37 2021\r Language: -\r Clone Volume: -\r Node name: cl-01\r Clone Parent Vserver Name: -\r FlexClone Parent Volume: -\r NVFAIL Option: on\r Volume's NVFAIL State: false\r Force NVFAIL on MetroCluster Switchover: off\r Is File System Size Fixed: false\r (DEPRECATED)-Extent Option: off\r Reserved Space for Overwrites: 0B\r Primary Space Management Strategy: volume_grow\r Read Reallocation Option: off\r Naming Scheme for Automatic Snapshot Copies: ordinal\r Inconsistency in the File System: false\r Is Volume Quiesced (On-Disk): false\r Is Volume Quiesced (In-Memory): false\r Volume Contains Shared or Compressed Data: false\r Space Saved by Storage Efficiency: 0B\r Percentage Saved by Storage Efficiency: 0%\r Space Saved by Deduplication: 0B\r Percentage Saved by Deduplication: 0%\r Space Shared by Deduplication: 0B\r Space Saved by Compression: 0B\r Percentage Space Saved by Compression: 0%\r Volume Size Used by Snapshot Copies: 243.7MB\r Block Type: 64-bit\r Is Volume Moving: -\r Flash Pool Caching Eligibility: read-write\r Flash Pool Write Caching Ineligibility Reason: -\r Managed By Storage Service: -\r Create Namespace Mirror Constituents For SnapDiff Use: -\r Constituent Volume Role: -\r QoS Policy Group Name: -\r Caching Policy Name: -\r Is Volume Move in Cutover Phase: -\r Number of Snapshot Copies in the Volume: 8\r VBN_BAD may be present in the active filesystem: false\r Is Volume on a hybrid aggregate: false\r Total Physical Used Size: 671.8MB\r Physical Used Percentage: 83%\r List of Nodes: -\r Is Volume a FlexGroup: false\r SnapLock Type: non-snaplock\r Vserver DR Protection: -\r UUID of the Efficiency Policy: b0f36cd7-e7bc-11e2-9994-123478563412\r \r Vserver Name: svm1\r Volume Name: svm1_root\r Aggregate Name: aggr1\r List of Aggregates for FlexGroup Constituents: -\r Volume Size: 800MB\r Volume Data Set ID: 1025\r Volume Master Data Set ID: 2155388521\r Volume State: online\r Volume Style: flex\r Extended Volume Style: flexvol\r Is Cluster-Mode Volume: true\r Is Constituent Volume: false\r Export Policy: default\r User ID: -\r Group ID: -\r Security Style: ntfs\r UNIX Permissions: ------------\r Junction Path: /\r Junction Path Source: -\r Junction Active: true\r Junction Parent Volume: -\r Comment:\r Available Size: 759.8MB\r Filesystem Size: 800MB\r Total User-Visible Size: 760MB\r Used Size: 244KB\r Used Percentage: 5%\r Volume Nearly Full Threshold Percent: 95%\r Volume Full Threshold Percent: 98%\r Maximum Autosize (for flexvols only): 960MB\r Minimum Autosize: 800MB\r Autosize Grow Threshold Percentage: 85%\r Autosize Shrink Threshold Percentage: 50%\r Autosize Mode: off\r Total Files (for user-visible data): 24313\r Files Used (for user-visible data): 103\r Space Guarantee in Effect: true\r Space SLO in Effect: true\r Space SLO: none\r Space Guarantee Style: volume\r Fractional Reserve: 100%\r Volume Type: RW\r Snapshot Directory Access Enabled: false\r Space Reserved for Snapshot Copies: 5%\r Snapshot Reserve Used: 0%\r Snapshot Policy: none\r Creation Time: Mon Mar 08 14:31:03 2021\r Language: C.UTF-8\r Clone Volume: false\r Node name: cl-01\r Clone Parent Vserver Name: -\r FlexClone Parent Volume: -\r NVFAIL Option: off\r Volume's NVFAIL State: false\r Force NVFAIL on MetroCluster Switchover: off\r Is File System Size Fixed: false\r (DEPRECATED)-Extent Option: off\r Reserved Space for Overwrites: 0B\r Primary Space Management Strategy: volume_grow\r Read Reallocation Option: off\r Naming Scheme for Automatic Snapshot Copies: create_time\r Inconsistency in the File System: false\r Is Volume Quiesced (On-Disk): false\r Is Volume Quiesced (In-Memory): false\r Volume Contains Shared or Compressed Data: false\r Space Saved by Storage Efficiency: 0B\r Percentage Saved by Storage Efficiency: 0%\r Space Saved by Deduplication: 0B\r Percentage Saved by Deduplication: 0%\r Space Shared by Deduplication: 0B\r Space Saved by Compression: 0B\r Percentage Space Saved by Compression: 0%\r Volume Size Used by Snapshot Copies: 0B\r Block Type: 64-bit\r Is Volume Moving: false\r Flash Pool Caching Eligibility: read-write\r Flash Pool Write Caching Ineligibility Reason: -\r Managed By Storage Service: -\r Create Namespace Mirror Constituents For SnapDiff Use: -\r Constituent Volume Role: -\r QoS Policy Group Name: -\r Caching Policy Name: -\r Is Volume Move in Cutover Phase: false\r Number of Snapshot Copies in the Volume: 0\r VBN_BAD may be present in the active filesystem: false\r Is Volume on a hybrid aggregate: false\r Total Physical Used Size: 244KB\r Physical Used Percentage: 0%\r List of Nodes: -\r Is Volume a FlexGroup: false\r SnapLock Type: non-snaplock\r Vserver DR Protection: -\r UUID of the Efficiency Policy: b0f36cd7-e7bc-11e2-9994-123478563412\r \r Vserver Name: svm1\r Volume Name: vol_svm1_1\r Aggregate Name: aggr1\r List of Aggregates for FlexGroup Constituents: -\r Volume Size: 2GB\r Volume Data Set ID: 1027\r Volume Master Data Set ID: 2155388523\r Volume State: online\r Volume Style: flex\r Extended Volume Style: flexvol\r Is Cluster-Mode Volume: true\r Is Constituent Volume: false\r Export Policy: default\r User ID: -\r Group ID: -\r Security Style: ntfs\r UNIX Permissions: ------------\r Junction Path: -\r Junction Path Source: -\r Junction Active: -\r Junction Parent Volume: -\r Comment:\r Available Size: 2.00GB\r Filesystem Size: 2GB\r Total User-Visible Size: 2GB\r Used Size: 3.84MB\r Used Percentage: 0%\r Volume Nearly Full Threshold Percent: 95%\r Volume Full Threshold Percent: 98%\r Maximum Autosize (for flexvols only): 2.40GB\r Minimum Autosize: 2GB\r Autosize Grow Threshold Percentage: 85%\r Autosize Shrink Threshold Percentage: 50%\r Autosize Mode: off\r Total Files (for user-visible data): 62258\r Files Used (for user-visible data): 97\r Space Guarantee in Effect: true\r Space SLO in Effect: true\r Space SLO: none\r Space Guarantee Style: volume\r Fractional Reserve: 100%\r Volume Type: RW\r Snapshot Directory Access Enabled: true\r Space Reserved for Snapshot Copies: 0%\r Snapshot Reserve Used: 0%\r Snapshot Policy: default\r Creation Time: Mon Mar 08 14:32:54 2021\r Language: C.UTF-8\r Clone Volume: false\r Node name: cl-01\r Clone Parent Vserver Name: -\r FlexClone Parent Volume: -\r NVFAIL Option: off\r Volume's NVFAIL State: false\r Force NVFAIL on MetroCluster Switchover: off\r Is File System Size Fixed: false\r (DEPRECATED)-Extent Option: off\r Reserved Space for Overwrites: 0B\r Primary Space Management Strategy: volume_grow\r Read Reallocation Option: off\r Naming Scheme for Automatic Snapshot Copies: create_time\r Inconsistency in the File System: false\r Is Volume Quiesced (On-Disk): false\r Is Volume Quiesced (In-Memory): false\r Volume Contains Shared or Compressed Data: false\r Space Saved by Storage Efficiency: 0B\r Percentage Saved by Storage Efficiency: 0%\r Space Saved by Deduplication: 0B\r Percentage Saved by Deduplication: 0%\r Space Shared by Deduplication: 0B\r Space Saved by Compression: 0B\r Percentage Space Saved by Compression: 0%\r Volume Size Used by Snapshot Copies: 2.98MB\r Block Type: 64-bit\r Is Volume Moving: false\r Flash Pool Caching Eligibility: read-write\r Flash Pool Write Caching Ineligibility Reason: -\r Managed By Storage Service: -\r Create Namespace Mirror Constituents For SnapDiff Use: -\r Constituent Volume Role: -\r QoS Policy Group Name: -\r Caching Policy Name: -\r Is Volume Move in Cutover Phase: false\r Number of Snapshot Copies in the Volume: 8\r VBN_BAD may be present in the active filesystem: false\r Is Volume on a hybrid aggregate: false\r Total Physical Used Size: 3.84MB\r Physical Used Percentage: 0%\r List of Nodes: -\r Is Volume a FlexGroup: false\r SnapLock Type: non-snaplock\r Vserver DR Protection: -\r UUID of the Efficiency Policy: b0f36cd7-e7bc-11e2-9994-123478563412\r 7 entries were displayed.""" ALERT_INFO = """----cluster----\r last login time : 12 456 789\r \r Node: node1\r Monitor: node-connect\r Alert ID: DualPathToDiskShelf_Alert\r Alerting Resource: 50:05:0c:c1:02:00:0f:02\r Subsystem: SAS-connect\r Indication Time: Mon Mar 10 10:26:38 2021\r Perceived Severity: Major\r Probable Cause: Connection_establishment_error\r Description: Disk shelf 2 does not disk\r disk 12312\r Corrective Actions: 1. Halt controller node1 and \r 2. Connect disk shelf 2 t\r 3. Reboot the halted controllers.\r 4. Contact support per.\r Possible Effect: Access to disk shelf\r Acknowledge: false\r Suppress: false\r Policy: DualPathToDiskShelf_Policy\r Acknowledger: -\r Suppressor: - \r Additional Information: Shelf uuid: 50:05:0c:c1:02:00:0f:02\r Shelf id: 2\r Shelf Name: 4d.shelf2\r Number of Paths: 1\r Number of Disks: 6\r Adapter connected to IOMA:\r Adapter connected to IOMB: 4d\r Alerting Resource Name: Shelf ID 2\r Additional Alert Tags: quality-of-service, nondisruptive-upgrade\r""" CONTROLLER_INFO = """----cluster----\r last login time : 12 456 789\r \r Node: cl-01\r Owner: \r Location: \r Model: SIMBOX\r Serial Number: 4082368-50-7\r Asset Tag: -\r Uptime: 1 days 06:17\r NVRAM System ID: 4082368507\r System ID: 4082368507\r Vendor: NetApp\r Health: true\r Eligibility: true\r Differentiated Services: false\r All-Flash Optimized: false\r """ PORTS_INFO = """----cluster----\r last login time : 12 456 789\r \r Node: cl-01\r Port: e0a\r Link: up\r MTU: 1500\r Auto-Negotiation Administrative: true\r Auto-Negotiation Operational: true\r Duplex Mode Administrative: auto\r Duplex Mode Operational: full\r Speed Administrative: auto\r Speed Operational: 1000\r Flow Control Administrative: full\r Flow Control Operational: none\r MAC Address: 00:0c:29:32:84:bd\r Port Type: physical\r Interface Group Parent Node: -\r Interface Group Parent Port: -\r Distribution Function: -\r Create Policy: -\r Parent VLAN Node: -\r Parent VLAN Port: -\r VLAN Tag: -\r Remote Device ID: -\r IPspace Name: Default\r Broadcast Domain: Default\r MTU Administrative: 1500\r Port Health Status: healthy\r Ignore Port Health Status: false\r Port Health Degraded Reasons: -\r \r Node: cl-01\r Port: e0b\r Link: up\r MTU: 1500\r Auto-Negotiation Administrative: true\r Auto-Negotiation Operational: true\r Duplex Mode Administrative: auto\r Duplex Mode Operational: full\r Speed Administrative: auto\r Speed Operational: 1000\r Flow Control Administrative: full\r Flow Control Operational: none\r MAC Address: 00:0c:29:32:84:c7\r Port Type: physical\r Interface Group Parent Node: -\r Interface Group Parent Port: -\r Distribution Function: -\r Create Policy: -\r Parent VLAN Node: -\r Parent VLAN Port: -\r VLAN Tag: -\r Remote Device ID: -\r IPspace Name: Default\r Broadcast Domain: Default\r MTU Administrative: 1500\r Port Health Status: healthy\r Ignore Port Health Status: false\r Port Health Degraded Reasons: -\r""" FC_PORT_INFO = """----cluster----\r last login time : 12 456 789\r \r Node: cl-01\r Adapter: 0a\r Description: Fibre Channel Target Adap\r Physical Protocol: fibre-channel\r Maximum Speed: 8\r Administrative Status: up\r Operational Status: online\r Extended Status: ADAPTER UP\r Host Port Address: 3e8\r Firmware Revision: 1.0.0\r Data Link Rate (Gbit): 8\r Fabric Established: true\r Fabric Name: -\r Connection Established: ptp\r Mediatype: ptp\r Configured Speed: auto\r Adapter WWNN: 50:0a:09:80:06:32:84:bd\r Adapter WWPN: 50:0a:09:81:06:32:84:bd\r Switch Port: ACME Switch:1\r Form Factor Of Transceiver: ACM\r Vendor Name Of Transceiver: SFP Vendor\r Part Number Of Transceiver: 0000\r Revision Of Transceiver: 1.0\r Serial Number Of Transceiver: 0000\r FC Capabilities Of Transceiver: 8 (Gbit/sec)\r Vendor OUI Of Transceiver: 0:5:2\r Wavelength In Nanometers: 0\r Date Code Of Transceiver: 11:04:02\r Validity Of Transceiver: true\r Connector Used: ACME Connector\r Encoding Used: 0\r Is Internally Calibrated: true\r Received Optical Power: 10.0 (uWatts)\r Is Received Power In Range: true\r SPF Transmitted Optical Power: 10.0 (uWatts)\r Is Xmit Power In Range: true\r \r Node: cl-01\r Adapter: 0b\r Description: Fibre Channel Target \r Physical Protocol: fibre-channel\r Maximum Speed: 8\r Administrative Status: up\r Operational Status: online\r Extended Status: ADAPTER UP\r Host Port Address: 3e9\r Firmware Revision: 1.0.0\r Data Link Rate (Gbit): 8\r Fabric Established: true\r Fabric Name: -\r Connection Established: ptp\r Mediatype: ptp\r Configured Speed: auto\r Adapter WWNN: 50:0a:09:80:06:32:84:bd\r Adapter WWPN: 50:0a:09:82:06:32:84:bd\r Switch Port: ACME Switch:1\r Form Factor Of Transceiver: ACM\r Vendor Name Of Transceiver: SFP Vendor\r Part Number Of Transceiver: 0000\r Revision Of Transceiver: 1.0\r Serial Number Of Transceiver: 0000\r FC Capabilities Of Transceiver: 8 (Gbit/sec)\r Vendor OUI Of Transceiver: 0:5:2\r Wavelength In Nanometers: 0\r Date Code Of Transceiver: 11:04:02\r Validity Of Transceiver: true\r Connector Used: ACME Connector\r Encoding Used: 0\r Is Internally Calibrated: true\r Received Optical Power: 10.0 (uWatts)\r Is Received Power In Range: true\r SPF Transmitted Optical Power: 10.0 (uWatts)\r Is Xmit Power In Range: true\r \r Node: cl-01\r Adapter: 0c\r Description: Fibre Channel Target Adapter)\r Physical Protocol: ethernet\r Maximum Speed: 10\r Administrative Status: up\r Operational Status: online\r Extended Status: ADAPTER UP\r Host Port Address: 3ea\r Firmware Revision: 1.0.0\r Data Link Rate (Gbit): 10\r Fabric Established: true\r Fabric Name: -\r Connection Established: ptp\r Mediatype: ptp\r Configured Speed: auto\r Adapter WWNN: 50:0a:09:80:06:32:84:bd\r Adapter WWPN: 50:0a:09:83:06:32:84:bd\r Switch Port: ACME Switch:1\r Form Factor Of Transceiver: ACM\r Vendor Name Of Transceiver: SFP Vendor\r Part Number Of Transceiver: 0000\r Revision Of Transceiver: 1.0\r Serial Number Of Transceiver: 0000\r FC Capabilities Of Transceiver: 2,8 (Gbit/sec)\r Vendor OUI Of Transceiver: 0:5:2\r Wavelength In Nanometers: 0\r Date Code Of Transceiver: 11:04:02\r Validity Of Transceiver: true\r Connector Used: ACME Connector\r Encoding Used: 0\r Is Internally Calibrated: true\r Received Optical Power: 10.0 (uWatts)\r Is Received Power In Range: true\r SPF Transmitted Optical Power: 10.0 (uWatts)\r Is Xmit Power In Range: true\r \r Node: cl-01\r Adapter: 0d\r Description: Fibre Channel Target Adapt)\r Physical Protocol: ethernet\r Maximum Speed: 10\r Administrative Status: up\r Operational Status: online\r Extended Status: ADAPTER UP\r Host Port Address: 3eb\r Firmware Revision: 1.0.0\r Data Link Rate (Gbit): 10\r Fabric Established: true\r Fabric Name: -\r Connection Established: ptp\r Mediatype: ptp\r Configured Speed: auto\r Adapter WWNN: 50:0a:09:80:06:32:84:bd\r Adapter WWPN: 50:0a:09:84:06:32:84:bd\r Switch Port: ACME Switch:1\r Form Factor Of Transceiver: ACM\r Vendor Name Of Transceiver: SFP Vendor\r Part Number Of Transceiver: 0000\r Revision Of Transceiver: 1.0\r Serial Number Of Transceiver: 0000\r FC Capabilities Of Transceiver: 2,8 (Gbit/sec)\r Vendor OUI Of Transceiver: 0:5:2\r Wavelength In Nanometers: 0\r Date Code Of Transceiver: 11:04:02\r Validity Of Transceiver: true\r Connector Used: ACME Connector\r Encoding Used: 0\r Is Internally Calibrated: true\r Received Optical Power: 10.0 (uWatts)\r Is Received Power In Range: true\r SPF Transmitted Optical Power: 10.0 (uWatts)\r Is Xmit Power In Range: true\r \r Node: cl-01\r Adapter: 0e\r Description: Fibre Channel Target Adap)\r Physical Protocol: fibre-channel\r Maximum Speed: 16\r Administrative Status: up\r Operational Status: online\r Extended Status: ADAPTER UP\r Host Port Address: 3ec\r Firmware Revision: 1.0.0\r Data Link Rate (Gbit): 16\r Fabric Established: true\r Fabric Name: -\r Connection Established: ptp\r Mediatype: ptp\r Configured Speed: auto\r Adapter WWNN: 50:0a:09:80:06:32:84:bd\r Adapter WWPN: 50:0a:09:85:06:32:84:bd\r Switch Port: ACME Switch:1\r Form Factor Of Transceiver: ACM\r Vendor Name Of Transceiver: SFP Vendor\r Part Number Of Transceiver: 0000\r Revision Of Transceiver: 1.0\r Serial Number Of Transceiver: 0000\r FC Capabilities Of Transceiver: 10 (Gbit/sec)\r Vendor OUI Of Transceiver: 0:5:2\r Wavelength In Nanometers: 0\r Date Code Of Transceiver: 11:04:02\r Validity Of Transceiver: true\r Connector Used: ACME Connector\r Encoding Used: 0\r Is Internally Calibrated: true\r Received Optical Power: 10.0 (uWatts)\r Is Received Power In Range: true\r SPF Transmitted Optical Power: 10.0 (uWatts)\r Is Xmit Power In Range: true\r 5 entries were displayed.\r""" DISKS_INFO = """----cluster----\r last login time : 12 456 789\r \r Disk: NET-1.1\r Container Type: aggregate\r Owner/Home: cl-01 / cl-01\r DR Home: -\r Stack ID/Shelf/Bay: - / - / 16\r LUN: 0\r Array: NETAPP_VD_1\r Vendor: NETAPP\r Model: VD-1000MB-FZ-520\r Serial Number: 07294300 UID: 4E455441:50502020:56442D31:\r BPS: 520\r Physical Size: 1.00GB\r Position: parity\r Checksum Compatibility: block\r Aggregate: aggr0\r Plex: plex0\r Paths:\r LUN Initiatr Side Target Side Link\r Controller IniD SwitcSwitch Port Acc Use Target Port TPGN Speed/s IOPS\r --------------- -------------------- --- -------------------- -------- ----\r cl-01 v1 0 N/A N/A AO INU 0000000000000000 0 0 Gb/S 0 0\r cl-01 v5 0 N/A N/A AO RDY 0000000000000000 0 0 Gb/S 0 0\r \r Errors:\r - Disk: NET-1.2\r Container Type: aggregate\r Owner/Home: cl-01 / cl-01\r DR Home: -\r Stack ID/Shelf/Bay: - / - / 17\r LUN: 0\r Array: NETAPP_VD_1\r Vendor: NETAPP\r Model: VD-1000MB-FZ-520\r Serial Number: 07294301\r UID: 4E455441:50502\r BPS: 520\r Physical Size: 1.00GB\r Position: dparity\r Checksum Compatibility: block\r Aggregate: aggr1\r Plex: plex0\r Paths:\r LUN Initiatr Side Target Side Link\r Controller IniD SwitcSwitch Port Acc Use Target Port TPGN Speed/s IOPS\r --------------- -------------------- --- -------------------- -------- ----\r cl-01 v1 0 N/A N/A AO INU 0000000000000000 0 0 Gb/S 0 0\r cl-01 v5 0 N/A N/A AO RDY 0000000000000000 0 0 Gb/S 0 0\r \r Errors:\r -\r """ PHYSICAL_INFO = """----cluster----\r last login time : 12 456 789\r \r Disk Type Vendor Model Revision RPM BPS\r ---------------- ------- -------- -------------------- -------- ------- ----\r NET-1.1 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294300\r NET-1.2 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294301\r NET-1.3 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294302\r NET-1.4 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294303\r NET-1.5 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294304\r NET-1.6 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294305\r NET-1.7 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294306\r NET-1.8 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294307\r NET-1.9 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904200\r NET-1.10 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904201\r NET-1.11 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904202\r NET-1.12 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904203\r NET-1.13 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904204\r NET-1.14 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294308\r NET-1.15 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294309\r NET-1.16 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294310\r NET-1.17 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294311\r NET-1.18 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904205\r NET-1.19 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904206\r NET-1.20 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904207\r NET-1.21 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904208\r NET-1.22 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904209\r NET-1.23 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904210\r NET-1.24 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904311\r NET-1.25 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904312\r NET-1.26 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07904313\r NET-1.27 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294312\r NET-1.28 FCAL NETAPP VD-1000MB-FZ-520 0042 15000 520\r SerialNumber: 07294313\r 28 entries were displayed.\r""" ERROR_DISK_INFO = """----cluster----\r last login time : 12 456 789\r \r Disk Error Type Error Text\r ---------------- ----------------- ----------------------------------\r NET-1.25 diskfail .""" QTREES_INFO = """----cluster----\r last login time : 12 456 789\r \r Vserver Name: svm1\r Volume Name: svm1_root\r Qtree Name: ""\r Actual (Non-Junction) Qtree Path: /vol/svm1_root\r Security Style: ntfs\r Oplock Mode: enable\r Unix Permissions: -\r Qtree Id: 0\r Qtree Status: normal\r Export Policy: default\r Is Export Policy Inherited: true\r \r Vserver Name: svm1\r Volume Name: vol_svm1_1\r Qtree Name: ""\r Actual (Non-Junction) Qtree Path: /vol/vol_svm1_1\r Security Style: ntfs\r Oplock Mode: enable\r Unix Permissions: -\r Qtree Id: 0\r Qtree Status: normal\r Export Policy: default\r Is Export Policy Inherited: true\r \r Vserver Name: svm1\r Volume Name: vol_svm1_1\r Qtree Name: qtree_svm1_1\r Actual (Non-Junction) Qtree Path: /vol/vol_svm1_1/qtree_svm1_1\r Security Style: unix\r Oplock Mode: enable\r Unix Permissions: ---rwxrwxrwx\r Qtree Id: 1\r Qtree Status: normal\r Export Policy: default\r Is Export Policy Inherited: true\r \r Vserver Name: svm1\r Volume Name: vol_svm1_2\r Qtree Name: ""\r Actual (Non-Junction) Qtree Path: /vol/vol_svm1_2\r Security Style: ntfs\r Oplock Mode: enable\r Unix Permissions: -\r Qtree Id: 0\r Qtree Status: normal\r Export Policy: default\r Is Export Policy Inherited: true""" SHARE_VSERVER_INFO = """----cluster----\r last login time : 12 456 789\r \r Admin Operational Root\r Vserver Type Subtype State State Volume Aggregate\r ----------- ------- ---------- ---------- ----------- ---------- ----------\r svm4.example.com data default running running SVC_FC_ NETAPP""" SHARES_INFO = """----cluster----\r last login time : 12 456 789\r \r Vserver: svm4.example.com\r Share: admin$\r CIFS Server NetBIOS Name: NETAPP-NODE01\r Path: /\r Share Properties: browsable\r Symlink Properties: -\r File Mode Creation Mask: -\r Directory Mode Creation Mask: -\r Share Comment: -\r Share ACL: -\r File Attribute Cache Lifetime: -\r Volume Name: svm4examplecom_root\r Offline Files: -\r Vscan File-Operations Profile: standard\r Maximum Tree Connections on Share: 4294967295\r UNIX Group for File Create: -\r \r Vserver: svm4.example.com\r Share: c$\r CIFS Server NetBIOS Name: NETAPP-NODE01\r Path: /\r Share Properties: oplocks\r browsable\r changenotify\r show-previous-versions\r Symlink Properties: symlinks\r File Mode Creation Mask: -\r Directory Mode Creation Mask: -\r Share Comment: -\r Share ACL: BUILTIN\r File Attribute Cache Lifetime: -\r Volume Name: svm4examplecom_root\r Offline Files: -\r Vscan File-Operations Profile: standard\r Maximum Tree Connections on Share: 4294967295\r UNIX Group for File Create: -\r \r Vserver: svm4.example.com\r Share: etc\r CIFS Server NetBIOS Name: NETAPP-NODE01\r Path: /.vsadmin/config/etc\r Share Properties: browsable\r changenotify\r oplocks\r show-previous-versions\r Symlink Properties: enable\r File Mode Creation Mask: -\r Directory Mode Creation Mask: -\r Share Comment: -\r Share ACL: Everyone / Full Control\r File Attribute Cache Lifetime: -\r Volume Name: svm4examplecom_root\r Offline Files: manual\r Vscan File-Operations Profile: standard\r Maximum Tree Connections on Share: 4294967295\r UNIX Group for File Create: -\r \r Vserver: svm4.example.com\r Share: ipc$\r CIFS Server NetBIOS Name: NETAPP-NODE01\r Path: /\r Share Properties: browsable\r Symlink Properties: -\r File Mode Creation Mask: -\r Directory Mode Creation Mask: -\r Share Comment: -\r Share ACL: -\r File Attribute Cache Lifetime: -\r Volume Name: svm4examplecom_root\r Offline Files: -\r Vscan File-Operations Profile: standard\r Maximum Tree Connections on Share: 4294967295\r UNIX Group for File Create: -\r \r Vserver: svm4.example.com\r Share: vol_svm4_1\r CIFS Server NetBIOS Name: NETAPP-NODE01\r Path: /vol_svm4_1\r Share Properties: oplocks\r browsable\r changenotify\r show-previous-versions\r Symlink Properties: symlinks\r File Mode Creation Mask: -\r Directory Mode Creation Mask: -\r Share Comment: -\r Share ACL: Everyone / Full Control\r File Attribute Cache Lifetime: -\r Volume Name: vol_svm4_1\r Offline Files: manual\r Vscan File-Operations Profile: standard\r Maximum Tree Connections on Share: 4294967295\r UNIX Group for File Create: -""" SHARES_AGREEMENT_INFO = """----cluster----\r last login time : 12 456 789\r \r vserver allowed-protocols\r ------- -----------------\r svm4.example.com nfs,cifs,fcp,iscsi\r 7 entries were displayed.\r """ THIN_FS_INFO = """----cluster----\r last login time : 12 456 789\r \r Vserver Volume Aggregate State Type Size Available Used%\r --------- ------------ ------------ ---------- ---- -\r svm1 vol_svm1_2 aggr1 online RW 2GB 2.00GB 0%\r""" TRAP_MAP = { '1.3.6.1.4.1.789.1.1.12.0': 'A Health Monitor has clear an alert. ' '[Alert Id = DisabledInuseSASPort_Alert , Alerting Resource = 0a].', 'controller_name': 'cl-01', '1.3.6.1.4.1.789.1.1.9.0': '1-80-000008' } QUOTAS_INFO = """----cluster----\r last login time : 12 456 789\r \r Vserver: svm5\r Policy Name: default\r Volume Name: svm5_vol1\r Type: tree\r Target: qtree_21052021_110317_94\r Qtree Name: ""\r User Mapping: -\r Disk Limit: 4.88MB\r Files Limit: 1000\r Threshold for Disk Limit: 4.88MB\r Soft Disk Limit: 4.88MB\r Soft Files Limit: 1000\r \r Vserver: svm5\r Policy Name: default\r Volume Name: svm5_vol1\r Type: user\r Target: ""\r Qtree Name: ""\r User Mapping: off\r Disk Limit: 4.88MB\r Files Limit: 1000\r Threshold for Disk Limit: 4.88MB\r Soft Disk Limit: 4.88MB\r Soft Files Limit: 1000\r \r Vserver: svm5\r Policy Name: default\r Volume Name: svm5_vol1\r Type: group\r Target: ""\r Qtree Name: ""\r User Mapping: -\r Disk Limit: 4.88MB\r Files Limit: 1000\r Threshold for Disk Limit: 4.88MB\r Soft Disk Limit: 4.88MB\r Soft Files Limit: 1000\r \r Vserver: svm5\r Policy Name: default\r Volume Name: svm5_vol1\r Type: group\r Target: ""\r Qtree Name: qtree_08052021_152034_44\r User Mapping: -\r Disk Limit: 4.88MB\r Files Limit: 100\r Threshold for Disk Limit: 4.88MB\r Soft Disk Limit: 4.88MB\r Soft Files Limit: 100\r \r Vserver: svm5\r Policy Name: default\r Volume Name: svm5_vol1\r Type: group\r Target: pcuser\r Qtree Name: ""\r User Mapping: -\r Disk Limit: 4.88MB\r Files Limit: 1000\r Threshold for Disk Limit: 4.88MB\r Soft Disk Limit: 4.88MB\r Soft Files Limit: 1000\r 5 entries were displayed.""" NFS_SHARE_INFO = """----cluster----\r last login time : 12 456 789\r \r Vserver Name: svm4.example.com\r Volume Name: svm4examplecom_root\r Aggregate Name: aggr1\r List of Aggregates for FlexGroup Constituents: -\r Volume Size: 20MB\r Volume Data Set ID: 1036\r Volume Master Data Set ID: 2155388532\r Volume State: online\r Volume Style: flex\r Extended Volume Style: flexvol\r Is Cluster-Mode Volume: true\r Is Constituent Volume: false\r Export Policy: default\r User ID: -\r Group ID: -\r Security Style: ntfs\r UNIX Permissions: ------------\r Junction Path: /\r Junction Path Source: -\r Junction Active: true\r Junction Parent Volume: -\r Comment:\r Available Size: 18.79MB\r Filesystem Size: 20MB\r Total User-Visible Size: 19MB\r Used Size: 220KB\r Used Percentage: 6%\r Volume Nearly Full Threshold Percent: 95%\r Volume Full Threshold Percent: 98%\r Maximum Autosize (for flexvols only): 24MB\r Minimum Autosize: 20MB\r Autosize Grow Threshold Percentage: 85%\r Autosize Shrink Threshold Percentage: 50%\r Autosize Mode: off\r Total Files (for user-visible data): 566\r Files Used (for user-visible data): 104\r Space Guarantee in Effect: true\r Space SLO in Effect: true\r Space SLO: none\r Space Guarantee Style: volume\r Fractional Reserve: 100%\r Volume Type: RW\r Snapshot Directory Access Enabled: true\r Space Reserved for Snapshot Copies: 5%\r Snapshot Reserve Used: 100%\r Snapshot Policy: default\r Creation Time: Sat Mar 13 20:15:43 20\r Language: C.UTF-8\r Clone Volume: false\r Node name: cl-01\r Clone Parent Vserver Name: -\r FlexClone Parent Volume: -\r NVFAIL Option: off\r Volume's NVFAIL State: false\r Force NVFAIL on MetroCluster Switchover: off\r Is File System Size Fixed: false\r (DEPRECATED)-Extent Option: off\r Reserved Space for Overwrites: 0B\r Primary Space Management Strategy: volume_grow\r Read Reallocation Option: off\r Naming Scheme for Automatic Snapshot Copies: create_time\r Inconsistency in the File System: false\r Is Volume Quiesced (On-Disk): false\r Is Volume Quiesced (In-Memory): false\r Volume Contains Shared or Compressed Data: false\r Space Saved by Storage Efficiency: 0B\r Percentage Saved by Storage Efficiency: 0%\r Space Saved by Deduplication: 0B\r Percentage Saved by Deduplication: 0%\r Space Shared by Deduplication: 0B\r Space Saved by Compression: 0B\r Percentage Space Saved by Compression: 0%\r Volume Size Used by Snapshot Copies: 1.00MB\r Block Type: 64-bit\r Is Volume Moving: false\r Flash Pool Caching Eligibility: read-write\r Flash Pool Write Caching Ineligibility Reason: -\r Managed By Storage Service: -\r Create Namespace Mirror Constituents For SnapDiff Use: -\r Constituent Volume Role: -\r QoS Policy Group Name: -\r Caching Policy Name: -\r Is Volume Move in Cutover Phase: false\r Number of Snapshot Copies in the Volume: 8\r VBN_BAD may be present in the active filesystem: false\r Is Volume on a hybrid aggregate: false\r Total Physical Used Size: 1.21MB\r Physical Used Percentage: 6%\r List of Nodes: -\r Is Volume a FlexGroup: false\r SnapLock Type: non-snaplock\r Vserver DR Protection: -\r \r Vserver Name: svm4.example.com\r Volume Name: vol_svm4_1\r Aggregate Name: aggr1\r List of Aggregates for FlexGroup Constituents: -\r Volume Size: 1GB\r Volume Data Set ID: 1037\r Volume Master Data Set ID: 2155388533\r Volume State: online\r Volume Style: flex\r Extended Volume Style: flexvol\r Is Cluster-Mode Volume: true\r Is Constituent Volume: false\r Export Policy: default\r User ID: 0\r Group ID: 0\r Security Style: mixed\r UNIX Permissions: ---rwxrwxrwx\r Junction Path: /vol_svm4_1\r Junction Path Source: RW_volume\r Junction Active: true\r Junction Parent Volume: svm4examplecom_root\r Comment:\r Available Size: 972.5MB\r Filesystem Size: 1GB\r Total User-Visible Size: 972.8MB\r Used Size: 340KB\r Used Percentage: 5%\r Volume Nearly Full Threshold Percent: 95%\r Volume Full Threshold Percent: 98%\r Maximum Autosize (for flexvols only): 1.20GB\r Minimum Autosize: 1GB\r Autosize Grow Threshold Percentage: 85%\r Autosize Shrink Threshold Percentage: 50%\r Autosize Mode: off\r Total Files (for user-visible data): 31122\r Files Used (for user-visible data): 97\r Space Guarantee in Effect: true\r Space SLO in Effect: true\r Space SLO: none\r Space Guarantee Style: volume\r Fractional Reserve: 100%\r Volume Type: RW\r Snapshot Directory Access Enabled: true\r Space Reserved for Snapshot Copies: 5%\r Snapshot Reserve Used: 3%\r Snapshot Policy: default\r Creation Time: Sat Mar 13 20:35:56 20\r Language: C.UTF-8\r Clone Volume: false\r Node name: cl-01\r Clone Parent Vserver Name: -\r FlexClone Parent Volume: -\r NVFAIL Option: off\r Volume's NVFAIL State: false\r Force NVFAIL on MetroCluster Switchover: off\r Is File System Size Fixed: false\r (DEPRECATED)-Extent Option: off\r Reserved Space for Overwrites: 0B\r Primary Space Management Strategy: volume_grow\r Read Reallocation Option: off\r Naming Scheme for Automatic Snapshot Copies: create_time\r Inconsistency in the File System: false\r Is Volume Quiesced (On-Disk): false\r Is Volume Quiesced (In-Memory): false\r Volume Contains Shared or Compressed Data: false\r Space Saved by Storage Efficiency: 0B\r Percentage Saved by Storage Efficiency: 0%\r Space Saved by Deduplication: 0B\r Percentage Saved by Deduplication: 0%\r Space Shared by Deduplication: 0B\r Space Saved by Compression: 0B\r Percentage Space Saved by Compression: 0%\r Volume Size Used by Snapshot Copies: 1.45MB\r Block Type: 64-bit\r Is Volume Moving: false\r Flash Pool Caching Eligibility: read-write\r Flash Pool Write Caching Ineligibility Reason: -\r Managed By Storage Service: -\r Create Namespace Mirror Constituents For SnapDiff Use: -\r Constituent Volume Role: -\r QoS Policy Group Name: -\r Caching Policy Name: -\r Is Volume Move in Cutover Phase: false\r Number of Snapshot Copies in the Volume: 8\r VBN_BAD may be present in the active filesystem: false\r Is Volume on a hybrid aggregate: false\r Total Physical Used Size: 1.78MB\r Physical Used Percentage: 0%\r List of Nodes: -\r Is Volume a FlexGroup: false\r SnapLock Type: non-snaplock\r Vserver DR Protection: -\r 8 entries were displayed.\r""" NODE_IPS_INFO = """----cluster----\r last login time : 12 456 789\r \r vserver lif address\r ------- ----------- ---------------\r cl cl-01_mgmt1 192.168.159.130""" CLUSTER_IPS_INFO = """----cluster----\r last login time : 12 456 789\r \r vserver lif address\r ------- ------------ ---------------\r cl cluster_mgmt 192.168.159.131""" CONTROLLER_IP_INFO = """vserver lif curr-node address\r --------- --------------- --------- ------------\r NetappFSA Netapp-01_mgmt1 cl-01 8.44.162.245""" RESOURCE_METRICS = { 'storage': ['iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime'], 'storagePool': ['iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime'], 'volume': ['iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime', 'cacheHitRatio', 'readCacheHitRatio', 'writeCacheHitRatio', 'ioSize', 'readIoSize', 'writeIoSize'], 'controller': ['iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime'], 'port': ['iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime'], 'disk': ['iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'responseTime'], 'filesystem': ['iops', 'readIops', 'writeIops', 'throughput', 'readThroughput', 'writeThroughput', 'ioSize', 'readIoSize', 'writeIoSize'], } CLUSTER_PER_INFO = [ { "timestamp": "2017-01-25T11:20:00Z", "status": "ok", "_links": { "self": { "href": "/api/resourcelink" } }, "throughput": { "read": "200", "total": "1000", "write": "100" }, "latency": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "iops": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "duration": "PT15S" } ] POOL_PER_INFO = [ { "timestamp": "2017-01-25T11:20:00Z", "status": "ok", "_links": { "self": { "href": "/api/resourcelink" } }, "throughput": { "read": "200", "total": "1000", "write": "100" }, "latency": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "iops": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "duration": "PT15S" } ] LUN_PER_INFO = [ { "timestamp": "2017-01-25T11:20:00Z", "status": "ok", "_links": { "self": { "href": "/api/resourcelink" } }, "throughput": { "read": "200", "total": "1000", "write": "100" }, "latency": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "iops": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "duration": "PT15S" } ] FS_PER_INFO = [ { "timestamp": "2017-01-25T11:20:00Z", "status": "ok", "_links": { "self": { "href": "/api/resourcelink" } }, "throughput": { "read": "200", "total": "1000", "write": "100" }, "latency": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "iops": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "duration": "PT15S" } ] FS_REST_INFO = [ { "name": "vol1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7", "svm": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" }, } ] PORT_REST_INFO = [ { "name": "e0a", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7", "node": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "node1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" }, } ] FC_PER_INFO = [ { "timestamp": "2017-01-25T11:20:00Z", "status": "ok", "_links": { "self": { "href": "/api/resourcelink" } }, "throughput": { "read": "200", "total": "1000", "write": "100" }, "latency": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "iops": { "other": 0, "read": "200", "total": "1000", "write": "100" }, "duration": "PT15S" } ] ETH_PER_INFO = [ { "timestamp": "2017-01-25T11:20:00Z", "status": "ok", "_links": { "self": { "href": "/api/resourcelink" } }, "throughput": { "read": "200", "total": "1000", "write": "100" }, "duration": "PT15S" } ] FC_INITIATOR_INFO = """ Vserver: PSA-xiejun00295347\r Logical Interface: PSA02-xiejun\r Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\r Port Address: 50400\r Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\r Initiator WWPN Alias: -\r Igroup Name: test00101, Test_MKL_Suse_8.44.133.65\r \r Vserver: SVC_FC\r Logical Interface: Migration_NetApp02_0c_02\r Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\r Port Address: 50400\r Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\r Initiator WWPN Alias: -\r Igroup Name: -\r \r Vserver: SVM_VDF\r Logical Interface: VDF_test02\r Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\r Port Address: 50400\r Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\r Initiator WWPN Alias: -\r Igroup Name: -\r \r Vserver: xiejun_00295347\r Logical Interface: xiejun_port1\r Initiator WWPN: 10:00:00:00:c9:d5:b9:6e\r Port Address: 50400\r Initiator WWNN: 20:00:00:00:c9:d5:b9:6e\r Initiator WWPN Alias: -\r Igroup Name: -\r 4 entries were displayed. """ ISCSI_INITIATOR_INFO = """ Vserver: svm3\r Target Portal Group: zb_IPV602\r Target Session ID: 357\r Initiator Name: iqn.2006-08.com.huawei:21004447dcca426::01\r Initiator Alias : -\r TPGroup Tag: 1062 \r Initiator Session ID: 80:12:34:58:78:9a\r Igroup Name: -""" HOSTS_INFO = """ Vserver Name: svm1\r Igroup Name: fcstart1\r Protocol: mixed\r OS Type: linux\r Portset Binding Igroup: portgroup\r Igroup UUID: c5ca5750-121f-11ec-b66c-000c29bfc4d7\r ALUA: true\r Initiators: 20:01:00:0c:29:bf:c4:d7 (not logged in)\r 10:00:00:00:c9:d5:b9:6e (not logged in)\r iqn.2006-08.com.huawei:21004447dcca426::0 (not logged in)\r \r Vserver Name: svm3\r Igroup Name: svm3\r Protocol: mixed\r OS Type: windows\r Portset Binding Igroup: portgroup2\r Igroup UUID: 9a6c2496-174b-11ec-b66c-000c29bfc4d7\r ALUA: true\r Initiators: iqn.2006-08.com.huawei:21004447dcca426::0 (not logged in)\r 10:00:00:00:c9:d5:b9:6e (not logged in)""" PORT_SET_INFO = """ Vserver Name: svm1\r Portset Name: portgroup\r LIF Or TPG Name: ontap-01_fc_lif_1, ontap-01_fcoe_lif_1, fc1\r Protocol: fcp\r Number Of Ports: 3\r Bound To Igroups: fcstart1\r \r Vserver Name: svm3\r Portset Name: portgroup2\r LIF Or TPG Name: ontap-01_iscsi_lif_1\r Protocol: iscsi\r Number Of Ports: 1\r Bound To Igroups: svm3\r 2 entries were displayed.""" LIF_INFO = """ Vserver Name: svm1\r Logical Interface Name: ontap-01_fc_lif_1\r Role: data\r Data Protocol: fcp\r Home Node: ontap-01\r Home Port: 0a\r Current Node: ontap-01\r Current Port: 0a\r Operational Status: down\r Extended Status: Groovy, man!\r Is Home: true\r Network Address: -\r Netmask: -\r Bits in the Netmask: -\r Subnet Name: -\r Administrative Status: up\r Failover Policy: disabled\r Firewall Policy: -\r Auto Revert: false\r Fully Qualified DNS Zone Name: none\r DNS Query Listen Enable: -\r Failover Group Name: -\r FCP WWPN: 20:00:00:0c:29:bf:c4:d7\r Address family: -\r Comment: -\r IPspace of LIF: -\r Is Dynamic DNS Update Enabled?: -\r \r Vserver Name: svm1\r Logical Interface Name: ontap-01_fcoe_lif_1\r Role: data\r Data Protocol: fcp\r Home Node: ontap-01\r Home Port: 0c\r Current Node: ontap-01\r Current Port: 0c\r Operational Status: down\r Extended Status: Groovy, man!\r Is Home: true\r Network Address: -\r Netmask: -\r Bits in the Netmask: -\r Subnet Name: -\r Administrative Status: up\r Failover Policy: disabled\r Firewall Policy: -\r Auto Revert: false\r Fully Qualified DNS Zone Name: none\r DNS Query Listen Enable: -\r Failover Group Name: -\r FCP WWPN: 20:01:00:0c:29:bf:c4:d7\r Address family: -\r Comment: -\r IPspace of LIF: -\r Is Dynamic DNS Update Enabled?: -\r \r Vserver Name: svm3\r Logical Interface Name: ontap-01_iscsi_lif_1\r Role: data\r Data Protocol: iscsi\r Home Node: ontap-01\r Home Port: e0a\r Current Node: ontap-01\r Current Port: e0a\r Operational Status: up\r Extended Status: -\r Is Home: true\r Network Address: 192.168.159.140\r Netmask: 255.255.255.0\r Bits in the Netmask: 24\r Subnet Name: -\r Administrative Status: up\r Failover Policy: disabled\r Firewall Policy: data\r Auto Revert: false\r Fully Qualified DNS Zone Name: none\r DNS Query Listen Enable: false\r Failover Group Name: -\r FCP WWPN: -\r Address family: ipv4\r Comment: -\r IPspace of LIF: Default\r Is Dynamic DNS Update Enabled?: false""" LUN_MAPPING_INFO = """ Vserver Name: svm1\r LUN Path: /vol/lun_1_vol/lun_1\r Volume Name: lun_1_vol\r Qtree Name: ""\r LUN Name: lun_1\r Igroup Name: fcstart1\r Igroup OS Type: windows\r Igroup Protocol Type: fcp\r LUN ID: 123\r Portset Binding Igroup: portgroup\r ALUA: true\r Initiators: 20:00:00:0c:29:bf:c4:d7, 10:00:00:00:c9:d5:b9:6e\r LUN Node: ontap-01\r Reporting Nodes: ontap-01\r \r Vserver Name: svm3\r LUN Path: /vol/svm3_lun/svm3_lun\r Volume Name: svm3_lun\r Qtree Name: ""\r LUN Name: svm3_lun\r Igroup Name: svm3\r Igroup OS Type: windows\r Igroup Protocol Type: iscsi\r LUN ID: 0\r Portset Binding Igroup: portgroup2\r ALUA: true\r Initiators: iqn.2006-08.com.huawei:21004447dcca426::0\r LUN Node: ontap-01\r Reporting Nodes: ontap-01\r 2 entries were displayed.""" MAPPING_LUN_INFO = """ Vserver Name: svm1\r LUN Path: /vol/lun_1_vol/lun_1\r Volume Name: lun_1_vol\r Qtree Name: ""\r LUN Name: lun_1\r LUN Size: 1.00GB\r OS Type: windows_2008\r Space Reservation: enabled\r Serial Number: wpEzy]RQjLqN\r Serial Number (Hex): 7770457a795d52516a4c714e\r Comment:\r Space Reservations Honored: true\r Space Allocation: disabled\r State: online\r LUN UUID: 2aa5a7ab-efbe-41f3-a4bf-dcd741e641a1\r Mapped: mapped\r Device Legacy ID: -\r Device Binary ID: -\r Device Text ID: -\r Read Only: false\r Fenced Due to Restore: false\r Used Size: 0\r Maximum Resize Size: 502.0GB\r Creation Time: 9/10/2021 09:57:47\r Class: regular\r Node Hosting the LUN: ontap-01\r QoS Policy Group: -\r Caching Policy Name: -\r Clone: false\r Clone Autodelete Enabled: false\r Inconsistent Import: false\r \r Vserver Name: svm3\r LUN Path: /vol/svm3_lun/svm3_lun\r Volume Name: svm3_lun\r Qtree Name: ""\r LUN Name: svm3_lun\r LUN Size: 1.00GB\r OS Type: windows_2008\r Space Reservation: enabled\r Serial Number: wpEzy]RQjLqA\r Serial Number (Hex): 7770457a795d52516a4c714e\r Comment:\r Space Reservations Honored: true\r Space Allocation: disabled\r State: online\r LUN UUID: 2aa5a7ab-efbe-41f3-a4bf-dcd741e624a1\r Mapped: mapped\r Device Legacy ID: -\r Device Binary ID: -\r Device Text ID: -\r Read Only: false\r Fenced Due to Restore: false\r Used Size: 0\r Maximum Resize Size: 502.0GB\r Creation Time: 9/10/2021 09:57:47\r Class: regular\r Node Hosting the LUN: ontap-01\r QoS Policy Group: -\r Caching Policy Name: -\r Clone: false\r Clone Autodelete Enabled: false\r Inconsistent Import: false""" ================================================ FILE: delfin/tests/unit/drivers/netapp/netapp_ontap/test_netapp.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase, mock import paramiko from delfin.tests.unit.drivers.netapp.netapp_ontap import test_constans from delfin import context from delfin.drivers.netapp.dataontap.netapp_handler import NetAppHandler from delfin.drivers.netapp.dataontap.cluster_mode import NetAppCmodeDriver from delfin.drivers.utils.ssh_client import SSHPool class Request: def __init__(self): self.environ = {'delfin.context': context.RequestContext()} pass class TestNetAppCmodeDriver(TestCase): SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) NetAppHandler.login = mock.Mock() NetAppHandler.do_rest_call = mock.Mock() netapp_client = NetAppCmodeDriver(**test_constans.ACCESS_INFO) def test_reset_connection(self): kwargs = test_constans.ACCESS_INFO NetAppHandler.login = mock.Mock() netapp_client = NetAppCmodeDriver(**kwargs) netapp_client.reset_connection(context, **kwargs) netapp_client.netapp_handler.do_rest_call = mock.Mock() self.assertEqual(netapp_client.netapp_handler.ssh_pool.ssh_host, "192.168.159.130") self.assertEqual(netapp_client.netapp_handler.ssh_pool.ssh_port, 22) def test_get_storage(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.SYSTEM_INFO, test_constans.VERSION, test_constans.SYSTEM_STATUS, test_constans.CONTROLLER_INFO, test_constans.CONTROLLER_IP_INFO, test_constans.DISKS_INFO, test_constans.PHYSICAL_INFO, test_constans.ERROR_DISK_INFO, test_constans.POOLS_INFO, test_constans.AGGREGATE_DETAIL_INFO]) data = self.netapp_client.get_storage(context) self.assertEqual(data['vendor'], 'NetApp') def test_list_storage_pools(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.POOLS_INFO, test_constans.AGGREGATE_DETAIL_INFO]) data = self.netapp_client.list_storage_pools(context) self.assertEqual(data[0]['name'], 'aggr0') def test_list_volumes(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.LUN_INFO, test_constans.FS_INFO, test_constans.THIN_FS_INFO, test_constans.POOLS_INFO, test_constans.AGGREGATE_DETAIL_INFO]) data = self.netapp_client.list_volumes(context) self.assertEqual(data[0]['name'], 'lun_0') def test_list_alerts(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.ALERT_INFO]) data = self.netapp_client.list_alerts(context) self.assertEqual(data[0]['alert_name'], 'DualPathToDiskShelf_Alert') def test_clear_alters(self): alert = {'alert_id': '123'} SSHPool.do_exec = mock.Mock() self.netapp_client.clear_alert(context, alert) def test_parse_alert(self): data = self.netapp_client.parse_alert(context, test_constans.TRAP_MAP) self.assertEqual(data['alert_name'], 'DisabledInuseSASPort_Alert') def test_list_controllers(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.CONTROLLER_INFO, test_constans.CONTROLLER_IP_INFO]) data = self.netapp_client.list_controllers(context) self.assertEqual(data[0]['name'], 'cl-01') def test_list_ports(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.FC_PORT_INFO, test_constans.PORTS_INFO]) data = self.netapp_client.list_ports(context) self.assertEqual(data[0]['name'], 'cl-01:0a') def test_list_disks(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.DISKS_INFO, test_constans.PHYSICAL_INFO, test_constans.ERROR_DISK_INFO]) data = self.netapp_client.list_disks(context) self.assertEqual(data[0]['name'], 'NET-1.1') def test_list_qtrees(self): SSHPool.do_exec = mock.Mock(side_effect=[ test_constans.QTREES_INFO, test_constans.FS_INFO]) data = self.netapp_client.list_qtrees(context) self.assertEqual(data[0]['security_mode'], 'ntfs') def test_list_shares(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.QTREES_INFO, test_constans.FS_INFO, test_constans.SHARES_AGREEMENT_INFO, test_constans.SHARE_VSERVER_INFO, test_constans.SHARES_INFO, test_constans.NFS_SHARE_INFO]) data = self.netapp_client.list_shares(context) self.assertEqual(data[0]['name'], 'admin$') def test_list_filesystems(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.FS_INFO, test_constans.THIN_FS_INFO, test_constans.POOLS_INFO, test_constans.AGGREGATE_DETAIL_INFO]) data = self.netapp_client.list_filesystems(context) self.assertEqual(data[0]['name'], 'vol0') def test_list_quotas(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.QUOTAS_INFO]) data = self.netapp_client.list_quotas(context) self.assertEqual(data[0]['file_soft_limit'], 1000) def test_ge_alert_sources(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.CLUSTER_IPS_INFO, test_constans.CONTROLLER_INFO, test_constans.CONTROLLER_IP_INFO]) data = self.netapp_client.get_alert_sources(context) self.assertEqual(data[0]['host'], '8.44.162.245') def test_get_storage_performance(self): SSHPool.do_exec = mock.Mock( side_effect=[ # storage test_constans.SYSTEM_INFO, # pool test_constans.AGGREGATE_DETAIL_INFO, # volume test_constans.LUN_INFO, ]) self.netapp_client.netapp_handler.do_rest_call = mock.Mock( side_effect=[ # storage test_constans.CLUSTER_PER_INFO, # pool test_constans.POOL_PER_INFO, test_constans.POOL_PER_INFO, test_constans.POOL_PER_INFO, # volume test_constans.LUN_PER_INFO, # port test_constans.PORT_REST_INFO, test_constans.FC_PER_INFO, test_constans.PORT_REST_INFO, test_constans.ETH_PER_INFO, # fs test_constans.FS_REST_INFO, test_constans.FS_PER_INFO, ]) data = self.netapp_client.collect_perf_metrics( context, test_constans.ACCESS_INFO['storage_id'], test_constans.RESOURCE_METRICS, start_time=str(1435214300000), end_time=str(1495315500000)) self.assertEqual(data[0][2][1485343200000], 1000) def test_get_capabilities_is_None(self): data = self.netapp_client.get_capabilities(context, None) self.assertEqual(data[9.8]['resource_metrics']['storage'] ['throughput']['unit'], 'MB/s') def test_get_capabilities(self): data = self.netapp_client.\ get_capabilities(context, {'firmware_version': 'NetApp Release 9.8R15'}) self.assertEqual(data['resource_metrics']['storage'] ['throughput']['unit'], 'MB/s') def test_list_storage_host_initiators(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.ISCSI_INITIATOR_INFO, test_constans.FC_INITIATOR_INFO, test_constans.HOSTS_INFO]) data = self.netapp_client.list_storage_host_initiators(context) self.assertEqual(data[0]['name'], '20:01:00:0c:29:bf:c4:d7') def test_list_port_groups(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.PORT_SET_INFO, test_constans.LIF_INFO]) data = self.netapp_client.list_port_groups(context) self.assertEqual(data['port_groups'][0]['name'], 'portgroup') def test_list_storage_hosts(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.HOSTS_INFO]) data = self.netapp_client.list_storage_hosts(context) self.assertEqual(data[0]['name'], 'fcstart1') def test_list_masking_views(self): SSHPool.do_exec = mock.Mock( side_effect=[test_constans.LUN_MAPPING_INFO, test_constans.MAPPING_LUN_INFO, test_constans.HOSTS_INFO]) data = self.netapp_client.list_masking_views(context) self.assertEqual(data[0]['name'], 'fcstart1_lun_1') def test_get_latest_perf_timestamp(self): self.netapp_client.netapp_handler.do_rest_call = mock.Mock( side_effect=[test_constans.CLUSTER_PER_INFO]) data = self.netapp_client.get_latest_perf_timestamp(context) self.assertEqual(data, 1485343200000) ================================================ FILE: delfin/tests/unit/drivers/pure/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/pure/flasharray/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/drivers/pure/flasharray/test_pure_flasharray.py ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys # import time from unittest import TestCase, mock import six from oslo_log import log # from oslo_utils import units from delfin.common import constants from delfin.drivers.pure.flasharray import consts sys.modules['delfin.cryptor'] = mock.Mock() from delfin import context from delfin.drivers.pure.flasharray.rest_handler import RestHandler from delfin.drivers.pure.flasharray.pure_flasharray import PureFlashArrayDriver LOG = log.getLogger(__name__) ACCESS_INFO = { "storage_id": "12345", "rest": { "host": "10.0.0.1", "port": 8443, "username": "user", "password": "pass" } } volumes_info = [ { "total": 116272464547, "name": "oracl_ail", "system": "", "snapshots": 0, "volumes": 116272464547, "data_reduction": 1.82656654775252, "size": 2156324555567, "shared_space": "", "thin_provisioning": 0.9225557589632, "total_reduction": 18.92245232244555 }, { "total": 0, "name": "wxt1", "system": "", "snapshots": 0, "volumes": 0, "data_reduction": 1, "size": 1073741824, "shared_space": "", "thin_provisioning": 1, "total_reduction": 1 } ] pool_info = [ { "name": "lktest", "volumes": [ "oracl_ail", "wxt1", "lktest/lk301", "lktest/lk401", "lktest/lk501", ] }, { "name": "ethanTestVG", "volumes": [ ] } ] volume_info = { "created": "2016-05-02T20:36:20Z", "name": "oracl_ail", "serial": "Fedd3455666y", "size": 1073740124, "source": "" } volume_info_two = { "created": "2016-05-02T20:36:20Z", "name": "wxt1", "serial": "Fedd3475666y", "size": 1073740124, "source": "" } storage_info = [ { "parity": "0.996586544522471235", "provisioned": "20869257625600", "hostname": "FA-m20", "system": 0, "snapshots": 0, "volumes": 227546215656, "data_reduction": 1, "capacity": 122276719419392, "total": 324829845504, "shared_space": 97544451659, "thin_provisioning": 0.9526445631455244, "total_reduction": 64.152236458789225 } ] storage_id_info = { "array_name": "pure01", "id": "dlmkk15xcfdf4v5", "revision": "2016-20-29mfmkkk", "version": "4.6.7" } alerts_info = [ { "category": "array", "code": 42, "actual": "", "opened": "2018-05-12T10:55:21Z", "component_type": "hardware", "event": "failure", "current_severity": "warning", "details": "", "expected": "", "id": 135, "component_name": "ct1.eth0" }, { "category": "array", "code": 13, "actual": "", "opened": "2018-05-12T10:55:21Z", "component_type": "process", "event": "server unreachable", "current_severity": "warning", "details": "", "expected": "", "id": 10088786, "component_name": "ct1.ntpd" } ] parse_alert_info = { '1.3.6.1.2.1.1.3.0': '30007589', '1.3.6.1.4.1.40482.3.7': '2', '1.3.6.1.4.1.40482.3.6': 'server error', '1.3.6.1.4.1.40482.3.3': 'cto', '1.3.6.1.4.1.40482.3.5': 'cto.server error' } controllers_info = [ { "status": "ready", "name": "CT0", "version": "5.3.0", "mode": "primary", "model": "FA-m20r2", "type": "array_controller" }, { "status": "ready", "name": "CT1", "version": "5.3.0", "mode": "secondary", "model": "FA-m20r2", "type": "array_controller" } ] hardware_info = [ { "details": "", "identify": "off", "index": 0, "name": "CTO.FC1", "slot": "", "speed": 0, "status": "ok", "temperature": "" }, { "details": "", "identify": "", "index": 0, "name": "CTO.ETH15", "slot": 0, "speed": 1000000, "status": "ok", "temperature": "" } ] drive_info = [ { "status": "healthy", "protocol": "SAS", "name": "CH0.BAY1", "last_evac_completed": "1970-01-01T00:00:00Z", "details": "", "capacity": 1027895542547, "type": "SSD", "last_failure": "1970-01-01T00:00:00Z" }, { "status": "healthy", "protocol": "SAS", "name": "CH0.BAY2", "last_evac_completed": "1970-01-01T00:00:00Z", "details": "", "capacity": 1027895542547, "type": "SSD", "last_failure": "1970-01-01T00:00:00Z" }, { "status": "healthy", "protocol": "SAS", "name": "CH0.BAY3", "last_evac_completed": "1970-01-01T00:00:00Z", "details": "", "capacity": 1027895542547, "type": "SSD", "last_failure": "1970-01-01T00:00:00Z" } ] port_info = [ { "name": "CTO.FC1", "failover": "", "iqn": "iqn.2016-11-01.com.pure", "portal": "100.12.253.23:4563", "wwn": "43ddff45ggg4rty", "nqn": "" }, { "name": "CTO.ETH15", "failover": "", "iqn": "iqn.2016-11-01.com.pure", "portal": "100.12.253.23:4563", "wwn": None, "nqn": None } ] port_network_info = [ { "name": "CTO.FC1", "address": "45233662jksndj", "speed": 12000, "netmask": "100.12.253.23:4563", "wwn": "43ddff45ggg4rty", "nqn": None, "services": [ "management" ] }, { "name": "CTO.ETH15", "address": "45233662jksndj", "speed": 13000, "netmask": "100.12.253.23:4563", "wwn": None, "nqn": None, "services": [ "management" ] } ] pools_info = [ { "total": "", "name": "lktest", "snapshots": "", "volumes": 0, "data_reduction": 1, "size": 5632155322, "thin_provisioning": 1, "total_reduction": 1 }, { "total": "", "name": "ethanTestVG", "snapshots": "", "volumes": 0, "data_reduction": 1, "size": 5632155322, "thin_provisioning": 1, "total_reduction": 1 } ] reset_connection_info = { "username": "username", "status": 200 } hosts_info = [ { "iqn": [ "iqn.1996-04.de.suse:01:ca9f3bcaf47" ], "wwn": [], "nqn": [], "name": "host", "hgroup": "HGTest" }, { "iqn": [], "wwn": [], "nqn": [], "name": "wxth", "hgroup": None }, { "iqn": [ "iqn.1991-05.com.microsoft:win3" ], "wwn": [], "nqn": [], "name": "huhuitest", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [], "name": "testGroup", "hgroup": None }, { "iqn": [], "wwn": [ "21000024FF2C9524", "21000024FF2C9525" ], "nqn": [], "name": "windows223", "hgroup": None }, { "iqn": [], "wwn": [ "10000000C9D5BC06", "10000000C9D5BC07" ], "nqn": [], "name": "CL-B06-RH2288HV3-8-44-157-33", "hgroup": None }, { "iqn": [], "wwn": [ "21000024FF76D0CC", "21000024FF76D0CD" ], "nqn": [], "name": "CL-C21-RH5885HV3-8-44-165-22", "hgroup": None }, { "iqn": [ "iqn.1996-04.de.suse:01:66bf70288332" ], "wwn": [], "nqn": [], "name": "test-1s", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [], "name": "rhev125", "hgroup": None }, { "iqn": [], "wwn": [ "210034800D6E7ADE", "210034800D6E7ADF" ], "nqn": [], "name": "QIB", "hgroup": "QIB" }, { "iqn": [], "wwn": [ "20090002D2937E9F", "20190002D2937E9F" ], "nqn": [], "name": "v6-8-44-128-21", "hgroup": None }, { "iqn": [ "iqn.1994-05.com.redhat:1a9eaa70b558" ], "wwn": [], "nqn": [], "name": "host135", "hgroup": None }, { "iqn": [], "wwn": [ "2200CC05777C3EDF", "2210CC05777C3EDF" ], "nqn": [], "name": "zty-doradoV6", "hgroup": None }, { "iqn": [ "iqn.1994-05.com.redhat:71cfb5b97df" ], "wwn": [ "21000024FF76D0CF" ], "nqn": [], "name": "CL-Test1", "hgroup": None }, { "iqn": [ "iqn.1994-05.com.redhat:80c412848b94" ], "wwn": [], "nqn": [], "name": "host137", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [], "name": "hsesxi", "hgroup": None }, { "iqn": [], "wwn": [ "21000024FF40272A", "21000024FF40272B" ], "nqn": [], "name": "zty-windows", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [], "name": "hosttest", "hgroup": "HGTest" }, { "iqn": [], "wwn": [ "21000024FF5351F0", "21000024FF5351F1" ], "nqn": [], "name": "hswin41", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [], "name": "ztj201", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [], "name": "test123", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [], "name": "zsytest", "hgroup": None }, { "iqn": [], "wwn": [], "nqn": [ "nqn.2021-12.org.nvmexpress.mytest" ], "name": "zhilong-host0000002130", "hgroup": None } ] HOSTS_PERSONALITY_INFO = [ { "name": "host", "personality": None }, { "name": "wxth", "personality": None }, { "name": "huhuitest", "personality": None }, { "name": "testGroup", "personality": None }, { "name": "windows223", "personality": None }, { "name": "CL-B06-RH2288HV3-8-44-157-33", "personality": None }, { "name": "CL-C21-RH5885HV3-8-44-165-22", "personality": None }, { "name": "test-1s", "personality": None }, { "name": "rhev125", "personality": None }, { "name": "QIB", "personality": None }, { "name": "v6-8-44-128-21", "personality": None }, { "name": "host135", "personality": None }, { "name": "zty-doradoV6", "personality": None }, { "name": "CL-Test1", "personality": None }, { "name": "host137", "personality": None }, { "name": "hsesxi", "personality": None }, { "name": "zty-windows", "personality": None }, { "name": "hosttest", "personality": None }, { "name": "hswin41", "personality": None }, { "name": "ztj201", "personality": None }, { "name": "test123", "personality": None }, { "name": "zsytest", "personality": None }, { "name": "zhilong-host0000002130", "personality": "aix" } ] HGROUP_INFO = [ { "hosts": [], "name": "podgroup" }, { "hosts": [], "name": "NewTest" }, { "hosts": [ "QIB" ], "name": "QIB" }, { "hosts": [ "host", "hosttest" ], "name": "HGTest" } ] VOLUME_GROUP_INFO = [ { "name": "vvol-pure-VM1-072e131e-vg", "volumes": [] }, { "name": "vvol-pure-vm2-e48a0ef8-vg", "volumes": [] }, { "name": "vvol-pure-vm3-65d42a4e-vg", "volumes": [] }, { "name": "vvol-pure-vm4-17c41971-vg", "volumes": [] }, { "name": "Volume-Group", "volumes": [ "Volume-Group/voltest001", "Volume-Group/voltest002", "Volume-Group/voltest003", "Volume-Group/voltest004", "Volume-Group/voltest005" ] }, { "name": "test1", "volumes": [] }, { "name": "tangxuan", "volumes": [] } ] HOSTS_CONNECT_INFO = [ { "vol": "huhuitest", "name": "huhuitest", "lun": 1, "hgroup": None }, { "vol": "test", "name": "wxth", "lun": 1, "hgroup": None }, { "vol": "test", "name": "testGroup", "lun": 1, "hgroup": None }, { "vol": "win2016_223", "name": "windows223", "lun": 1, "hgroup": None }, { "vol": "pure-protocol-endpoint", "name": "CL-C21-RH5885HV3-8-44-165-22", "lun": 1, "hgroup": None }, { "vol": "CL_VOLUME_1_remote", "name": "CL-C21-RH5885HV3-8-44-165-22", "lun": 2, "hgroup": None }, { "vol": "lun-test1s", "name": "test-1s", "lun": 1, "hgroup": None }, { "vol": "QIB1", "name": "QIB", "lun": 254, "hgroup": "QIB" }, { "vol": "QIB1", "name": "zty-windows", "lun": 254, "hgroup": "QIB" }, { "vol": "QIB2", "name": "zty-windows", "lun": 253, "hgroup": "QIB" }, { "vol": "QIB2", "name": "QIB", "lun": 253, "hgroup": "QIB" }, { "vol": "yzw_iotest", "name": "host135", "lun": 2, "hgroup": None }, { "vol": "homelab-pso-db_0000000003", "name": "host137", "lun": 3, "hgroup": None }, { "vol": "homelab-pso-db_0000000009", "name": "host135", "lun": 3, "hgroup": None }, { "vol": "homelab-pso-db_0000000012", "name": "host135", "lun": 6, "hgroup": None }, { "vol": "v6-8-44-128-21", "name": "v6-8-44-128-21", "lun": 1, "hgroup": None }, { "vol": "V6-8-44-128-21-002", "name": "v6-8-44-128-21", "lun": 2, "hgroup": None }, { "vol": "homelab-pso-db_0000000007", "name": "host137", "lun": 7, "hgroup": None }, { "vol": "homelab-pso-db_0000000010", "name": "host135", "lun": 4, "hgroup": None }, { "vol": "homelab-pso-db_0000000013", "name": "host137", "lun": 2, "hgroup": None }, { "vol": "homelab-pso-db_0000000000", "name": "host135", "lun": 5, "hgroup": None }, { "vol": "homelab-pso-db_0000000001", "name": "host137", "lun": 4, "hgroup": None }, { "vol": "homelab-pso-db_0000000016", "name": "host137", "lun": 5, "hgroup": None }, { "vol": "homelab-pso-db_0000000018", "name": "host135", "lun": 7, "hgroup": None }, { "vol": "homelab-pso-db_0000000015", "name": "host135", "lun": 8, "hgroup": None }, { "vol": "homelab-pso-db_0000000020", "name": "host137", "lun": 6, "hgroup": None }, { "vol": "homelab-pso-db_0000000021", "name": "host135", "lun": 9, "hgroup": None }, { "vol": "homelab-pso-db_0000000022", "name": "host137", "lun": 8, "hgroup": None }, { "vol": "homelab-pso-db_0000000019", "name": "host135", "lun": 10, "hgroup": None }, { "vol": "homelab-pso-db_0000000026", "name": "host137", "lun": 9, "hgroup": None }, { "vol": "homelab-pso-db_0000000028", "name": "host135", "lun": 11, "hgroup": None }, { "vol": "homelab-pso-db_0000000024", "name": "host137", "lun": 10, "hgroup": None }, { "vol": "hsboot", "name": "hsesxi", "lun": 1, "hgroup": None }, { "vol": "hszdata", "name": "hsesxi", "lun": 2, "hgroup": None }, { "vol": "zty_lun16", "name": "zty-doradoV6", "lun": 1, "hgroup": None }, { "vol": "zty_lun15", "name": "zty-doradoV6", "lun": 2, "hgroup": None }, { "vol": "zty_lun13", "name": "zty-doradoV6", "lun": 3, "hgroup": None }, { "vol": "zty_lun11", "name": "zty-doradoV6", "lun": 4, "hgroup": None }, { "vol": "zty_lun14", "name": "zty-doradoV6", "lun": 5, "hgroup": None }, { "vol": "zty_lun2", "name": "zty-doradoV6", "lun": 6, "hgroup": None }, { "vol": "zty_lun5", "name": "zty-doradoV6", "lun": 7, "hgroup": None }, { "vol": "zty_lun4", "name": "zty-doradoV6", "lun": 8, "hgroup": None }, { "vol": "zty_lun1", "name": "zty-doradoV6", "lun": 9, "hgroup": None }, { "vol": "zty_lun3", "name": "zty-doradoV6", "lun": 10, "hgroup": None }, { "vol": "zty_lun6", "name": "zty-doradoV6", "lun": 11, "hgroup": None }, { "vol": "zty_lun12", "name": "zty-doradoV6", "lun": 12, "hgroup": None }, { "vol": "zty_lun10", "name": "zty-doradoV6", "lun": 13, "hgroup": None }, { "vol": "zty_lun8", "name": "zty-doradoV6", "lun": 14, "hgroup": None }, { "vol": "zty_lun7", "name": "zty-doradoV6", "lun": 15, "hgroup": None }, { "vol": "zty_lun9", "name": "zty-doradoV6", "lun": 16, "hgroup": None }, { "vol": "Volume-Group/voltest005", "name": "hosttest", "lun": 254, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest005", "name": "host", "lun": 254, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest001", "name": "host", "lun": 253, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest001", "name": "hosttest", "lun": 253, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest002", "name": "host", "lun": 252, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest002", "name": "hosttest", "lun": 252, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest003", "name": "host", "lun": 251, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest003", "name": "hosttest", "lun": 251, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest004", "name": "hosttest", "lun": 250, "hgroup": "HGTest" }, { "vol": "Volume-Group/voltest004", "name": "host", "lun": 250, "hgroup": "HGTest" }, { "vol": "homelab-pso-db_0000000001-u", "name": "CL-B06-RH2288HV3-8-44-157-33", "lun": 4, "hgroup": None }, { "vol": "Volume-Group/voltest001", "name": "CL-B06-RH2288HV3-8-44-157-33", "lun": 1, "hgroup": None }, { "vol": "hswin4102", "name": "zhilong-host0000002130", "lun": 1, "hgroup": None }, { "vol": "tangxuan/tt001", "name": "host135", "lun": 1, "hgroup": None }, { "vol": "hswin", "name": "CL-Test1", "lun": 1, "hgroup": None }, { "vol": "homelab-pso-db_0000000000-u", "name": "zhilong-host0000002130", "lun": 2, "hgroup": None }, { "vol": "nc::136_connect", "name": "hosttest", "lun": 1, "hgroup": None } ] HGROUP_CONNECT_INFO = [ { "vol": "QIB1", "name": "QIB", "lun": 254 }, { "vol": "QIB2", "name": "QIB", "lun": 253 }, { "vol": "Volume-Group/voltest005", "name": "HGTest", "lun": 254 }, { "vol": "Volume-Group/voltest001", "name": "HGTest", "lun": 253 }, { "vol": "Volume-Group/voltest002", "name": "HGTest", "lun": 252 }, { "vol": "Volume-Group/voltest003", "name": "HGTest", "lun": 251 }, { "vol": "Volume-Group/voltest004", "name": "HGTest", "lun": 250 }, { "vol": "homelab-pso-db_0000000002", "name": "NewTest", "lun": 254 }, { "vol": "yzw_test0", "name": "zhilong-hg", "lun": 254 } ] volume_data = [ {'native_volume_id': 'oracl_ail', 'name': 'oracl_ail', 'total_capacity': 2156324555567, 'used_capacity': 116272464547, 'free_capacity': 2040052091020, 'storage_id': '12345', 'status': 'normal', 'type': 'thin'}, {'native_volume_id': 'wxt1', 'name': 'wxt1', 'total_capacity': 1073741824, 'used_capacity': 0, 'free_capacity': 1073741824, 'storage_id': '12345', 'status': 'normal', 'type': 'thin'}] storage_data = { 'model': 'FA-m20r2', 'total_capacity': 122276719419392, 'raw_capacity': 3083686627641, 'used_capacity': 324829845504, 'free_capacity': 121951889573888, 'vendor': 'PURE', 'name': 'pure01', 'serial_number': 'dlmkk15xcfdf4v5', 'firmware_version': '4.6.7', 'status': 'normal'} list_alert_data = [ {'occur_time': 1526122521000, 'alert_id': 135, 'severity': 'Warning', 'category': 'Fault', 'location': 'ct1.eth0', 'type': 'EquipmentAlarm', 'resource_type': 'Storage', 'alert_name': 'failure', 'match_key': '7f1de29e6da19d22b51c68001e7e0e54', 'description': '(hardware:ct1.eth0): failure'}, {'occur_time': 1526122521000, 'alert_id': 10088786, 'severity': 'Warning', 'category': 'Fault', 'location': 'ct1.ntpd', 'type': 'EquipmentAlarm', 'resource_type': 'Storage', 'alert_name': 'server unreachable', 'match_key': 'b35a0c63d4cd82256b95f51522c6ba32', 'description': '(process:ct1.ntpd): server unreachable'}] parse_alert_data = { 'alert_id': '30007589', 'severity': 'Informational', 'category': 'Fault', 'occur_time': 1644833673861, 'description': '(None:cto): server error', 'location': 'cto', 'type': 'EquipmentAlarm', 'resource_type': 'Storage', 'alert_name': 'cto.server error', 'sequence_number': '30007589', 'match_key': '11214c87bb6efcf8dc2aed1095271774'} controllers_data = [ {'name': 'CT0', 'status': 'unknown', 'soft_version': '5.3.0', 'storage_id': '12345', 'native_controller_id': 'CT0', 'location': 'CT0'}, {'name': 'CT1', 'status': 'unknown', 'soft_version': '5.3.0', 'storage_id': '12345', 'native_controller_id': 'CT1', 'location': 'CT1'}] disk_data = [ {'name': 'CH0.BAY1', 'physical_type': 'ssd', 'status': 'normal', 'storage_id': '12345', 'capacity': 1027895542547, 'speed': None, 'model': None, 'serial_number': None, 'native_disk_id': 'CH0.BAY1', 'location': 'CH0.BAY1', 'manufacturer': 'PURE', 'firmware': ''}, {'name': 'CH0.BAY2', 'physical_type': 'ssd', 'status': 'normal', 'storage_id': '12345', 'capacity': 1027895542547, 'speed': None, 'model': None, 'serial_number': None, 'native_disk_id': 'CH0.BAY2', 'location': 'CH0.BAY2', 'manufacturer': 'PURE', 'firmware': ''}, {'name': 'CH0.BAY3', 'physical_type': 'ssd', 'status': 'normal', 'storage_id': '12345', 'capacity': 1027895542547, 'speed': None, 'model': None, 'serial_number': None, 'native_disk_id': 'CH0.BAY3', 'location': 'CH0.BAY3', 'manufacturer': 'PURE', 'firmware': ''}] port_data = [ {'type': 'fc', 'name': 'CTO.FC1', 'native_port_id': 'CTO.FC1', 'storage_id': '12345', 'location': 'CTO.FC1', 'connection_status': 'disconnected', 'speed': 0, 'health_status': 'normal', 'wwn': '43:dd:ff:45:gg:g4:rt:y', 'mac_address': None, 'logical_type': 'management', 'ipv4_mask': '100.12.253.23:4563', 'ipv4': '45233662jksndj'}, {'type': 'eth', 'name': 'CTO.ETH15', 'native_port_id': 'CTO.ETH15', 'storage_id': '12345', 'location': 'CTO.ETH15', 'connection_status': 'connected', 'speed': 1000000, 'health_status': 'normal', 'wwn': 'iqn.2016-11-01.com.pure', 'mac_address': None, 'logical_type': 'management', 'ipv4_mask': '100.12.253.23:4563', 'ipv4': '45233662jksndj'}] initiator_data = [ {'native_storage_host_initiator_id': 'iqn.1996-04.de.suse:01:ca9f3bcaf47', 'native_storage_host_id': 'host', 'name': 'iqn.1996-04.de.suse:01:ca9f3bcaf47', 'type': 'iscsi', 'status': 'unknown', 'wwn': 'iqn.1996-04.de.suse:01:ca9f3bcaf47', 'storage_id': '12345'}, {'native_storage_host_initiator_id': 'iqn.1991-05.com.microsoft:win3', 'native_storage_host_id': 'huhuitest', 'name': 'iqn.1991-05.com.microsoft:win3', 'type': 'iscsi', 'status': 'unknown', 'wwn': 'iqn.1991-05.com.microsoft:win3', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:2C:95:24', 'native_storage_host_id': 'windows223', 'name': '21:00:00:24:FF:2C:95:24', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:2C:95:24', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:2C:95:25', 'native_storage_host_id': 'windows223', 'name': '21:00:00:24:FF:2C:95:25', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:2C:95:25', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '10:00:00:00:C9:D5:BC:06', 'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33', 'name': '10:00:00:00:C9:D5:BC:06', 'type': 'fc', 'status': 'unknown', 'wwn': '10:00:00:00:C9:D5:BC:06', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '10:00:00:00:C9:D5:BC:07', 'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33', 'name': '10:00:00:00:C9:D5:BC:07', 'type': 'fc', 'status': 'unknown', 'wwn': '10:00:00:00:C9:D5:BC:07', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:76:D0:CC', 'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22', 'name': '21:00:00:24:FF:76:D0:CC', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:76:D0:CC', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:76:D0:CD', 'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22', 'name': '21:00:00:24:FF:76:D0:CD', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:76:D0:CD', 'storage_id': '12345'}, {'native_storage_host_initiator_id': 'iqn.1996-04.de.suse:01:66bf70288332', 'native_storage_host_id': 'test-1s', 'name': 'iqn.1996-04.de.suse:01:66bf70288332', 'type': 'iscsi', 'status': 'unknown', 'wwn': 'iqn.1996-04.de.suse:01:66bf70288332', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:34:80:0D:6E:7A:DE', 'native_storage_host_id': 'QIB', 'name': '21:00:34:80:0D:6E:7A:DE', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:34:80:0D:6E:7A:DE', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:34:80:0D:6E:7A:DF', 'native_storage_host_id': 'QIB', 'name': '21:00:34:80:0D:6E:7A:DF', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:34:80:0D:6E:7A:DF', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '20:09:00:02:D2:93:7E:9F', 'native_storage_host_id': 'v6-8-44-128-21', 'name': '20:09:00:02:D2:93:7E:9F', 'type': 'fc', 'status': 'unknown', 'wwn': '20:09:00:02:D2:93:7E:9F', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '20:19:00:02:D2:93:7E:9F', 'native_storage_host_id': 'v6-8-44-128-21', 'name': '20:19:00:02:D2:93:7E:9F', 'type': 'fc', 'status': 'unknown', 'wwn': '20:19:00:02:D2:93:7E:9F', 'storage_id': '12345'}, {'native_storage_host_initiator_id': 'iqn.1994-05.com.redhat:1a9eaa70b558', 'native_storage_host_id': 'host135', 'name': 'iqn.1994-05.com.redhat:1a9eaa70b558', 'type': 'iscsi', 'status': 'unknown', 'wwn': 'iqn.1994-05.com.redhat:1a9eaa70b558', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '22:00:CC:05:77:7C:3E:DF', 'native_storage_host_id': 'zty-doradoV6', 'name': '22:00:CC:05:77:7C:3E:DF', 'type': 'fc', 'status': 'unknown', 'wwn': '22:00:CC:05:77:7C:3E:DF', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '22:10:CC:05:77:7C:3E:DF', 'native_storage_host_id': 'zty-doradoV6', 'name': '22:10:CC:05:77:7C:3E:DF', 'type': 'fc', 'status': 'unknown', 'wwn': '22:10:CC:05:77:7C:3E:DF', 'storage_id': '12345'}, {'native_storage_host_initiator_id': 'iqn.1994-05.com.redhat:71cfb5b97df', 'native_storage_host_id': 'CL-Test1', 'name': 'iqn.1994-05.com.redhat:71cfb5b97df', 'type': 'iscsi', 'status': 'unknown', 'wwn': 'iqn.1994-05.com.redhat:71cfb5b97df', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:76:D0:CF', 'native_storage_host_id': 'CL-Test1', 'name': '21:00:00:24:FF:76:D0:CF', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:76:D0:CF', 'storage_id': '12345'}, {'native_storage_host_initiator_id': 'iqn.1994-05.com.redhat:80c412848b94', 'native_storage_host_id': 'host137', 'name': 'iqn.1994-05.com.redhat:80c412848b94', 'type': 'iscsi', 'status': 'unknown', 'wwn': 'iqn.1994-05.com.redhat:80c412848b94', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:40:27:2A', 'native_storage_host_id': 'zty-windows', 'name': '21:00:00:24:FF:40:27:2A', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:40:27:2A', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:40:27:2B', 'native_storage_host_id': 'zty-windows', 'name': '21:00:00:24:FF:40:27:2B', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:40:27:2B', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:53:51:F0', 'native_storage_host_id': 'hswin41', 'name': '21:00:00:24:FF:53:51:F0', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:53:51:F0', 'storage_id': '12345'}, {'native_storage_host_initiator_id': '21:00:00:24:FF:53:51:F1', 'native_storage_host_id': 'hswin41', 'name': '21:00:00:24:FF:53:51:F1', 'type': 'fc', 'status': 'unknown', 'wwn': '21:00:00:24:FF:53:51:F1', 'storage_id': '12345'}, {'native_storage_host_initiator_id': 'nqn.2021-12.org.nvmexpress.mytest', 'native_storage_host_id': 'zhilong-host0000002130', 'name': 'nqn.2021-12.org.nvmexpress.mytest', 'type': 'nvme-of', 'status': 'unknown', 'wwn': 'nqn.2021-12.org.nvmexpress.mytest', 'storage_id': '12345'}] host_data = [ {'name': 'host', 'storage_id': '12345', 'native_storage_host_id': 'host', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'wxth', 'storage_id': '12345', 'native_storage_host_id': 'wxth', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'huhuitest', 'storage_id': '12345', 'native_storage_host_id': 'huhuitest', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'testGroup', 'storage_id': '12345', 'native_storage_host_id': 'testGroup', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'windows223', 'storage_id': '12345', 'native_storage_host_id': 'windows223', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'CL-B06-RH2288HV3-8-44-157-33', 'storage_id': '12345', 'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'CL-C21-RH5885HV3-8-44-165-22', 'storage_id': '12345', 'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'test-1s', 'storage_id': '12345', 'native_storage_host_id': 'test-1s', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'rhev125', 'storage_id': '12345', 'native_storage_host_id': 'rhev125', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'QIB', 'storage_id': '12345', 'native_storage_host_id': 'QIB', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'v6-8-44-128-21', 'storage_id': '12345', 'native_storage_host_id': 'v6-8-44-128-21', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'host135', 'storage_id': '12345', 'native_storage_host_id': 'host135', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'zty-doradoV6', 'storage_id': '12345', 'native_storage_host_id': 'zty-doradoV6', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'CL-Test1', 'storage_id': '12345', 'native_storage_host_id': 'CL-Test1', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'host137', 'storage_id': '12345', 'native_storage_host_id': 'host137', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'hsesxi', 'storage_id': '12345', 'native_storage_host_id': 'hsesxi', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'zty-windows', 'storage_id': '12345', 'native_storage_host_id': 'zty-windows', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'hosttest', 'storage_id': '12345', 'native_storage_host_id': 'hosttest', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'hswin41', 'storage_id': '12345', 'native_storage_host_id': 'hswin41', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'ztj201', 'storage_id': '12345', 'native_storage_host_id': 'ztj201', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'test123', 'storage_id': '12345', 'native_storage_host_id': 'test123', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'zsytest', 'storage_id': '12345', 'native_storage_host_id': 'zsytest', 'os_type': 'Unknown', 'status': 'normal'}, {'name': 'zhilong-host0000002130', 'storage_id': '12345', 'native_storage_host_id': 'zhilong-host0000002130', 'os_type': 'AIX', 'status': 'normal'}] host_group_data = { 'storage_host_groups': [ {'native_storage_host_group_id': 'podgroup', 'name': 'podgroup', 'storage_id': '12345'}, {'native_storage_host_group_id': 'NewTest', 'name': 'NewTest', 'storage_id': '12345'}, {'native_storage_host_group_id': 'QIB', 'name': 'QIB', 'storage_id': '12345'}, {'native_storage_host_group_id': 'HGTest', 'name': 'HGTest', 'storage_id': '12345'}], 'storage_host_grp_host_rels': [ {'native_storage_host_group_id': 'QIB', 'storage_id': '12345', 'native_storage_host_id': 'QIB'}, {'native_storage_host_group_id': 'HGTest', 'storage_id': '12345', 'native_storage_host_id': 'host'}, {'native_storage_host_group_id': 'HGTest', 'storage_id': '12345', 'native_storage_host_id': 'hosttest'}] } volume_group_data = { 'volume_groups': [ {'name': 'vvol-pure-VM1-072e131e-vg', 'storage_id': '12345', 'native_volume_group_id': 'vvol-pure-VM1-072e131e-vg'}, {'name': 'vvol-pure-vm2-e48a0ef8-vg', 'storage_id': '12345', 'native_volume_group_id': 'vvol-pure-vm2-e48a0ef8-vg'}, {'name': 'vvol-pure-vm3-65d42a4e-vg', 'storage_id': '12345', 'native_volume_group_id': 'vvol-pure-vm3-65d42a4e-vg'}, {'name': 'vvol-pure-vm4-17c41971-vg', 'storage_id': '12345', 'native_volume_group_id': 'vvol-pure-vm4-17c41971-vg'}, {'name': 'Volume-Group', 'storage_id': '12345', 'native_volume_group_id': 'Volume-Group'}, {'name': 'test1', 'storage_id': '12345', 'native_volume_group_id': 'test1'}, {'name': 'tangxuan', 'storage_id': '12345', 'native_volume_group_id': 'tangxuan'} ], 'vol_grp_vol_rels': [ {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group', 'native_volume_id': 'Volume-Group/voltest001'}, {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group', 'native_volume_id': 'Volume-Group/voltest002'}, {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group', 'native_volume_id': 'Volume-Group/voltest003'}, {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group', 'native_volume_id': 'Volume-Group/voltest004'}, {'storage_id': '12345', 'native_volume_group_id': 'Volume-Group', 'native_volume_id': 'Volume-Group/voltest005'}] } views_data = [ {'native_masking_view_id': 'QIBQIB1', 'name': 'QIBQIB1', 'native_storage_host_group_id': 'QIB', 'native_volume_id': 'QIB1', 'storage_id': '12345'}, {'native_masking_view_id': 'QIBQIB2', 'name': 'QIBQIB2', 'native_storage_host_group_id': 'QIB', 'native_volume_id': 'QIB2', 'storage_id': '12345'}, {'native_masking_view_id': 'HGTestVolume-Group/voltest005', 'name': 'HGTestVolume-Group/voltest005', 'native_storage_host_group_id': 'HGTest', 'native_volume_id': 'Volume-Group/voltest005', 'storage_id': '12345'}, {'native_masking_view_id': 'HGTestVolume-Group/voltest001', 'name': 'HGTestVolume-Group/voltest001', 'native_storage_host_group_id': 'HGTest', 'native_volume_id': 'Volume-Group/voltest001', 'storage_id': '12345'}, {'native_masking_view_id': 'HGTestVolume-Group/voltest002', 'name': 'HGTestVolume-Group/voltest002', 'native_storage_host_group_id': 'HGTest', 'native_volume_id': 'Volume-Group/voltest002', 'storage_id': '12345'}, {'native_masking_view_id': 'HGTestVolume-Group/voltest003', 'name': 'HGTestVolume-Group/voltest003', 'native_storage_host_group_id': 'HGTest', 'native_volume_id': 'Volume-Group/voltest003', 'storage_id': '12345'}, {'native_masking_view_id': 'HGTestVolume-Group/voltest004', 'name': 'HGTestVolume-Group/voltest004', 'native_storage_host_group_id': 'HGTest', 'native_volume_id': 'Volume-Group/voltest004', 'storage_id': '12345'}, {'native_masking_view_id': 'NewTesthomelab-pso-db_0000000002', 'name': 'NewTesthomelab-pso-db_0000000002', 'native_storage_host_group_id': 'NewTest', 'native_volume_id': 'homelab-pso-db_0000000002', 'storage_id': '12345'}, {'native_masking_view_id': 'zhilong-hgyzw_test0', 'name': 'zhilong-hgyzw_test0', 'native_storage_host_group_id': 'zhilong-hg', 'native_volume_id': 'yzw_test0', 'storage_id': '12345'}, {'native_masking_view_id': 'huhuitestNonehuhuitest', 'name': 'huhuitestNonehuhuitest', 'native_storage_host_id': 'huhuitest', 'native_volume_id': 'huhuitest', 'storage_id': '12345'}, {'native_masking_view_id': 'wxthNonetest', 'name': 'wxthNonetest', 'native_storage_host_id': 'wxth', 'native_volume_id': 'test', 'storage_id': '12345'}, {'native_masking_view_id': 'testGroupNonetest', 'name': 'testGroupNonetest', 'native_storage_host_id': 'testGroup', 'native_volume_id': 'test', 'storage_id': '12345'}, {'native_masking_view_id': 'windows223Nonewin2016_223', 'name': 'windows223Nonewin2016_223', 'native_storage_host_id': 'windows223', 'native_volume_id': 'win2016_223', 'storage_id': '12345'}, { 'native_masking_view_id': 'CL-C21-RH5885HV3-8-44-165-22Nonepure-protocol-endpoint', 'name': 'CL-C21-RH5885HV3-8-44-165-22Nonepure-protocol-endpoint', 'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22', 'native_volume_id': 'pure-protocol-endpoint', 'storage_id': '12345'}, { 'native_masking_view_id': 'CL-C21-RH5885HV3-8-44-165-22NoneCL_VOLUME_1_remote', 'name': 'CL-C21-RH5885HV3-8-44-165-22NoneCL_VOLUME_1_remote', 'native_storage_host_id': 'CL-C21-RH5885HV3-8-44-165-22', 'native_volume_id': 'CL_VOLUME_1_remote', 'storage_id': '12345'}, {'native_masking_view_id': 'test-1sNonelun-test1s', 'name': 'test-1sNonelun-test1s', 'native_storage_host_id': 'test-1s', 'native_volume_id': 'lun-test1s', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Noneyzw_iotest', 'name': 'host135Noneyzw_iotest', 'native_storage_host_id': 'host135', 'native_volume_id': 'yzw_iotest', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000003', 'name': 'host137Nonehomelab-pso-db_0000000003', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000003', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000009', 'name': 'host135Nonehomelab-pso-db_0000000009', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000009', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000012', 'name': 'host135Nonehomelab-pso-db_0000000012', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000012', 'storage_id': '12345'}, {'native_masking_view_id': 'v6-8-44-128-21Nonev6-8-44-128-21', 'name': 'v6-8-44-128-21Nonev6-8-44-128-21', 'native_storage_host_id': 'v6-8-44-128-21', 'native_volume_id': 'v6-8-44-128-21', 'storage_id': '12345'}, {'native_masking_view_id': 'v6-8-44-128-21NoneV6-8-44-128-21-002', 'name': 'v6-8-44-128-21NoneV6-8-44-128-21-002', 'native_storage_host_id': 'v6-8-44-128-21', 'native_volume_id': 'V6-8-44-128-21-002', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000007', 'name': 'host137Nonehomelab-pso-db_0000000007', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000007', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000010', 'name': 'host135Nonehomelab-pso-db_0000000010', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000010', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000013', 'name': 'host137Nonehomelab-pso-db_0000000013', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000013', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000000', 'name': 'host135Nonehomelab-pso-db_0000000000', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000000', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000001', 'name': 'host137Nonehomelab-pso-db_0000000001', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000001', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000016', 'name': 'host137Nonehomelab-pso-db_0000000016', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000016', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000018', 'name': 'host135Nonehomelab-pso-db_0000000018', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000018', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000015', 'name': 'host135Nonehomelab-pso-db_0000000015', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000015', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000020', 'name': 'host137Nonehomelab-pso-db_0000000020', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000020', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000021', 'name': 'host135Nonehomelab-pso-db_0000000021', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000021', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000022', 'name': 'host137Nonehomelab-pso-db_0000000022', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000022', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000019', 'name': 'host135Nonehomelab-pso-db_0000000019', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000019', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000026', 'name': 'host137Nonehomelab-pso-db_0000000026', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000026', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonehomelab-pso-db_0000000028', 'name': 'host135Nonehomelab-pso-db_0000000028', 'native_storage_host_id': 'host135', 'native_volume_id': 'homelab-pso-db_0000000028', 'storage_id': '12345'}, {'native_masking_view_id': 'host137Nonehomelab-pso-db_0000000024', 'name': 'host137Nonehomelab-pso-db_0000000024', 'native_storage_host_id': 'host137', 'native_volume_id': 'homelab-pso-db_0000000024', 'storage_id': '12345'}, {'native_masking_view_id': 'hsesxiNonehsboot', 'name': 'hsesxiNonehsboot', 'native_storage_host_id': 'hsesxi', 'native_volume_id': 'hsboot', 'storage_id': '12345'}, {'native_masking_view_id': 'hsesxiNonehszdata', 'name': 'hsesxiNonehszdata', 'native_storage_host_id': 'hsesxi', 'native_volume_id': 'hszdata', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun16', 'name': 'zty-doradoV6Nonezty_lun16', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun16', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun15', 'name': 'zty-doradoV6Nonezty_lun15', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun15', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun13', 'name': 'zty-doradoV6Nonezty_lun13', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun13', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun11', 'name': 'zty-doradoV6Nonezty_lun11', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun11', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun14', 'name': 'zty-doradoV6Nonezty_lun14', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun14', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun2', 'name': 'zty-doradoV6Nonezty_lun2', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun2', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun5', 'name': 'zty-doradoV6Nonezty_lun5', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun5', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun4', 'name': 'zty-doradoV6Nonezty_lun4', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun4', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun1', 'name': 'zty-doradoV6Nonezty_lun1', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun1', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun3', 'name': 'zty-doradoV6Nonezty_lun3', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun3', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun6', 'name': 'zty-doradoV6Nonezty_lun6', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun6', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun12', 'name': 'zty-doradoV6Nonezty_lun12', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun12', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun10', 'name': 'zty-doradoV6Nonezty_lun10', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun10', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun8', 'name': 'zty-doradoV6Nonezty_lun8', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun8', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun7', 'name': 'zty-doradoV6Nonezty_lun7', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun7', 'storage_id': '12345'}, {'native_masking_view_id': 'zty-doradoV6Nonezty_lun9', 'name': 'zty-doradoV6Nonezty_lun9', 'native_storage_host_id': 'zty-doradoV6', 'native_volume_id': 'zty_lun9', 'storage_id': '12345'}, { 'native_masking_view_id': 'CL-B06-RH2288HV3-8-44-157-33Nonehomelab-pso-db_0000000001-u', 'name': 'CL-B06-RH2288HV3-8-44-157-33Nonehomelab-pso-db_0000000001-u', 'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33', 'native_volume_id': 'homelab-pso-db_0000000001-u', 'storage_id': '12345'}, { 'native_masking_view_id': 'CL-B06-RH2288HV3-8-44-157-33NoneVolume-Group/voltest001', 'name': 'CL-B06-RH2288HV3-8-44-157-33NoneVolume-Group/voltest001', 'native_storage_host_id': 'CL-B06-RH2288HV3-8-44-157-33', 'native_volume_id': 'Volume-Group/voltest001', 'storage_id': '12345'}, {'native_masking_view_id': 'zhilong-host0000002130Nonehswin4102', 'name': 'zhilong-host0000002130Nonehswin4102', 'native_storage_host_id': 'zhilong-host0000002130', 'native_volume_id': 'hswin4102', 'storage_id': '12345'}, {'native_masking_view_id': 'host135Nonetangxuan/tt001', 'name': 'host135Nonetangxuan/tt001', 'native_storage_host_id': 'host135', 'native_volume_id': 'tangxuan/tt001', 'storage_id': '12345'}, {'native_masking_view_id': 'CL-Test1Nonehswin', 'name': 'CL-Test1Nonehswin', 'native_storage_host_id': 'CL-Test1', 'native_volume_id': 'hswin', 'storage_id': '12345'}, { 'native_masking_view_id': 'zhilong-host0000002130Nonehomelab-pso-db_0000000000-u', 'name': 'zhilong-host0000002130Nonehomelab-pso-db_0000000000-u', 'native_storage_host_id': 'zhilong-host0000002130', 'native_volume_id': 'homelab-pso-db_0000000000-u', 'storage_id': '12345'}, {'native_masking_view_id': 'hosttestNonenc::136_connect', 'name': 'hosttestNonenc::136_connect', 'native_storage_host_id': 'hosttest', 'native_volume_id': 'nc::136_connect', 'storage_id': '12345'}] storage_resource_metrics = { constants.ResourceType.STORAGE: consts.STORAGE_CAP, } volume_resource_metrics = { constants.ResourceType.VOLUME: consts.VOLUME_CAP } drive_metrics = [ { "writes_per_sec": 0, "output_per_sec": 0, "usec_per_write_op": 0, "local_queue_usec_per_op": 0, "time": "2022-04-25T02:24:46Z", "reads_per_sec": 0, "input_per_sec": 0, "usec_per_read_op": 0, "queue_depth": 0 }, { "writes_per_sec": 1856, "output_per_sec": 0, "usec_per_write_op": 653021.569741, "local_queue_usec_per_op": 43158, "time": "2022-04-25T02:25:46Z", "reads_per_sec": 0, "input_per_sec": 0, "usec_per_read_op": 5360, "queue_depth": 0 }] volume_metrics_info = [{ "writes_per_sec": 1864, "name": "136_connect", "usec_per_write_op": 46200000, "output_per_sec": 0, "reads_per_sec": 0, "input_per_sec": 5620302, "time": "2022-04-12T02:12:16Z", "usec_per_read_op": 0 }, { "writes_per_sec": 1864, "name": "136_connect", "usec_per_write_op": 46200000, "output_per_sec": 0, "reads_per_sec": 0, "input_per_sec": 5620302, "time": "2022-04-12T02:13:16Z", "usec_per_read_op": 0 }] def create_driver(): RestHandler.login = mock.Mock( return_value={None}) return PureFlashArrayDriver(**ACCESS_INFO) class test_PureFlashArrayDriver(TestCase): driver = create_driver() def test_init(self): RestHandler.login = mock.Mock( return_value={""}) PureFlashArrayDriver(**ACCESS_INFO) def test_list_volumes(self): RestHandler.get_volumes = mock.Mock( side_effect=[volumes_info]) volume = self.driver.list_volumes(context) self.assertEqual(volume, volume_data) def test_get_storage(self): RestHandler.rest_call = mock.Mock( side_effect=[storage_info, hardware_info, drive_info, storage_id_info, controllers_info]) storage_object = self.driver.get_storage(context) self.assertEqual(storage_object, storage_data) def test_list_alerts(self): RestHandler.rest_call = mock.Mock( side_effect=[alerts_info]) list_alerts = self.driver.list_alerts(context) self.assertEqual(list_alerts, list_alert_data) def test_parse_alert(self): parse_alert = self.driver.parse_alert(context, parse_alert_info) parse_alert_data['occur_time'] = parse_alert.get('occur_time') self.assertDictEqual(parse_alert, parse_alert_data) def test_list_controllers(self): RestHandler.rest_call = mock.Mock( side_effect=[controllers_info, hardware_info]) list_controllers = self.driver.list_controllers(context) self.assertListEqual(list_controllers, controllers_data) def test_list_disks(self): RestHandler.rest_call = mock.Mock( side_effect=[hardware_info, drive_info]) list_disks = self.driver.list_disks(context) self.assertListEqual(list_disks, disk_data) def test_list_ports(self): RestHandler.rest_call = mock.Mock( side_effect=[port_network_info, port_info, hardware_info]) list_ports = self.driver.list_ports(context) self.assertListEqual(list_ports, port_data) def test_list_storage_pools(self): list_storage_pools = self.driver.list_storage_pools(context) self.assertEqual(list_storage_pools, []) def test_reset_connection(self): RestHandler.logout = mock.Mock(side_effect=None) RestHandler.login = mock.Mock(side_effect=None) username = None try: self.driver.reset_connection(context) except Exception as e: LOG.error("test_reset_connection error: %s", six.text_type(e)) username = reset_connection_info.get('username') self.assertEqual(username, None) def test_list_storage_host_initiators(self): RestHandler.rest_call = mock.Mock( side_effect=[hosts_info]) hosts = self.driver.list_storage_host_initiators(context) self.assertEqual(hosts, initiator_data) def test_list_storage_hosts(self): RestHandler.rest_call = mock.Mock( side_effect=[HOSTS_PERSONALITY_INFO]) hosts = self.driver.list_storage_hosts(context) self.assertListEqual(hosts, host_data) def test_list_storage_host_groups(self): RestHandler.rest_call = mock.Mock( side_effect=[HGROUP_INFO]) hgroup = self.driver.list_storage_host_groups(context) self.assertDictEqual(hgroup, host_group_data) def test_list_volume_groups(self): RestHandler.rest_call = mock.Mock( side_effect=[VOLUME_GROUP_INFO]) v_group = self.driver.list_volume_groups(context) self.assertDictEqual(v_group, volume_group_data) def test_list_masking_views(self): RestHandler.rest_call = mock.Mock( side_effect=[HGROUP_CONNECT_INFO, HOSTS_CONNECT_INFO]) views = self.driver.list_masking_views(context) self.assertListEqual(views, views_data) # def test_collect_perf_metrics(self): # RestHandler.rest_call = mock.Mock( # side_effect=[storage_id_info, drive_metrics]) # localtime = time.mktime(time.localtime()) * units.k # storage_id = 12345 # start_time = localtime - 1000 * 60 * 60 * 24 * 364 # end_time = localtime # metrics = self.driver.collect_perf_metrics( # context, storage_id, storage_resource_metrics, start_time, # end_time) # storage_metrics = [ # constants.metric_struct( # name='iops', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'IOPS'}, # values={1650853440000: 0, 1650853500000: 1856} # ), # constants.metric_struct( # name='readIops', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'IOPS'}, # values={1650853440000: 0, 1650853500000: 0} # ), # constants.metric_struct( # name='writeIops', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'IOPS'}, # values={1650853440000: 0, 1650853500000: 1856} # ), # constants.metric_struct( # name='throughput', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'MB/s'}, # values={1650853440000: 0.0, 1650853500000: 0.0} # ), # constants.metric_struct( # name='readThroughput', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'MB/s'}, # values={1650853440000: 0.0, 1650853500000: 0.0} # ), # constants.metric_struct( # name='writeThroughput', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'MB/s'}, # values={1650853440000: 0.0, 1650853500000: 0.0} # ), # constants.metric_struct( # name='readResponseTime', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'ms'}, # values={1650853440000: 0.0, 1650853500000: 5.36} # ), # constants.metric_struct( # name='writeResponseTime', # labels={ # 'storage_id': 12345, # 'resource_type': 'storage', # 'resource_id': 'dlmkk15xcfdf4v5', # 'resource_name': 'pure01', # 'type': 'RAW', # 'unit': 'ms'}, # values={1650853440000: 0.0, 1650853500000: 653.022} # ) # ] # self.assertListEqual(metrics, storage_metrics) # volume_metrics = [ # constants.metric_struct( # name='iops', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'IOPS'}, # values={1649729520000: 1864, 1649729580000: 1864} # ), # constants.metric_struct( # name='readIops', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'IOPS'}, # values={1649729520000: 0, 1649729580000: 0} # ), # constants.metric_struct( # name='writeIops', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'IOPS'}, # values={1649729520000: 1864, 1649729580000: 1864} # ), # constants.metric_struct( # name='throughput', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'MB/s'}, # values={1649729520000: 5.36, 1649729580000: 5.36} # ), # constants.metric_struct( # name='readThroughput', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'MB/s'}, # values={1649729520000: 0.0, 1649729580000: 0.0} # ), # constants.metric_struct( # name='writeThroughput', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'MB/s'}, # values={1649729520000: 5.36, 1649729580000: 5.36} # ), # constants.metric_struct( # name='readResponseTime', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'ms'}, # values={1649729520000: 0.0, 1649729580000: 0.0} # ), # constants.metric_struct( # name='writeResponseTime', # labels={ # 'storage_id': 12345, # 'resource_type': 'volume', # 'resource_id': '136_connect', # 'resource_name': '136_connect', # 'type': 'RAW', # 'unit': 'ms'}, # values={1649729520000: 46200.0, 1649729580000: 46200.0} # ) # ] # RestHandler.rest_call = mock.Mock( # side_effect=[volume_metrics_info]) # metrics = self.driver.collect_perf_metrics( # context, storage_id, volume_resource_metrics, start_time, # end_time) # self.assertListEqual(metrics, volume_metrics) def test_get_capabilities(self): err = None try: self.driver.get_capabilities(context) except Exception as e: err = six.text_type(e) LOG.error("test_get_capabilities error: %s", err) self.assertEqual(err, None) def test_get_latest_perf_timestamp(self): RestHandler.rest_call = mock.Mock( side_effect=[drive_metrics]) timestamp = self.driver.get_latest_perf_timestamp(context) times = 1650853500000 self.assertEqual(timestamp, times) ================================================ FILE: delfin/tests/unit/drivers/test_api.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from unittest import TestCase, mock import sys from delfin import context # from delfin import exception from delfin.common import config, constants # noqa from delfin.drivers.api import API from delfin.drivers.fake_storage import FakeStorageDriver sys.modules['delfin.cryptor'] = mock.Mock() class Request: def __init__(self): self.environ = {'delfin.context': context.RequestContext()} pass ACCESS_INFO = { "storage_id": "12345", "vendor": "fake_storage", "model": "fake_driver", "rest": { "host": "10.0.0.1", "port": "8443", "username": "user", "password": "pass" }, "extra_attributes": { "array_id": "00112233" } } STORAGE = { 'name': 'fake_driver', 'description': 'it is a fake driver.', 'vendor': 'fake_vendor', 'model': 'fake_model', 'status': 'normal', 'serial_number': '2102453JPN12KA000011', 'firmware_version': '1.0.0', 'location': 'HK', 'total_capacity': 1024 * 1024, 'used_capacity': 3126, 'free_capacity': 1045449, } class TestDriverAPI(TestCase): def test_init(self): api = API() self.assertIsNotNone(api.driver_manager) # @mock.patch('delfin.db.storage_get') # @mock.patch('delfin.db.storage_create') # @mock.patch('delfin.db.access_info_create') # @mock.patch('delfin.db.storage_get_all') # def test_discover_storage(self, mock_storage, mock_access_info, # mock_storage_create, mock_get_storage): # # Case: Positive scenario for fake driver discovery # storage = copy.deepcopy(STORAGE) # storage['id'] = '12345' # mock_storage.return_value = None # mock_access_info.return_value = ACCESS_INFO # mock_storage_create.return_value = storage # api = API() # api.discover_storage(context, ACCESS_INFO) # mock_storage.assert_called() # mock_access_info.assert_called_with(context, ACCESS_INFO) # mock_storage_create.assert_called() # mock_get_storage.return_value = None # # Case: Register already existing storage # with self.assertRaises(exception.StorageAlreadyExists) as exc: # mock_storage.return_value = storage # api.discover_storage(context, ACCESS_INFO) # self.assertIn('Storage already exists', str(exc.exception)) # mock_storage.return_value = None # # Case: Storage without serial number # wrong_storage = copy.deepcopy(STORAGE) # wrong_storage.pop('serial_number') # wrong_storage['id'] = '12345' # m = mock.Mock() # with mock.patch.object(FakeStorageDriver, 'get_storage') as m: # with self.assertRaises(exception.InvalidResults) as exc: # m.return_value = wrong_storage # api.discover_storage(context, ACCESS_INFO) # self.assertIn('Serial number should be provided by storage', # str(exc.exception)) # # Case: No Storage found # with self.assertRaises(exception.StorageBackendNotFound) as exc: # m.return_value = None # api.discover_storage(context, ACCESS_INFO) # self.assertIn('Storage backend could not be found', # str(exc.exception)) # # Case: Test access info without 'storage_id' for driver # test_access_info = copy.deepcopy(ACCESS_INFO) # test_access_info.pop('storage_id') # s = api.discover_storage(context, ACCESS_INFO) # self.assertDictEqual(s, storage) # # Case: Wrong access info (model) for driver # wrong_access_info = copy.deepcopy(ACCESS_INFO) # wrong_access_info['model'] = 'wrong_model' # with self.assertRaises(exception.StorageDriverNotFound) as exc: # api.discover_storage(context, wrong_access_info) # msg = "Storage driver 'fake_storage wrong_model'could not be found" # self.assertIn(msg, str(exc.exception)) # @mock.patch.object(FakeStorageDriver, 'get_storage') # @mock.patch('delfin.db.storage_update') # @mock.patch('delfin.db.access_info_update') # @mock.patch('delfin.db.storage_get') # def test_update_access_info(self, mock_storage_get, # mock_access_info_update, # mock_storage_update, # mock_storage): # storage = copy.deepcopy(STORAGE) # access_info_new = copy.deepcopy(ACCESS_INFO) # access_info_new['rest']['username'] = 'new_user' # mock_storage_get.return_value = storage # mock_access_info_update.return_value = access_info_new # mock_storage_update.return_value = None # mock_storage.return_value = storage # api = API() # updated = api.update_access_info(context, access_info_new) # storage_id = '12345' # mock_storage_get.assert_called_with( # context, storage_id) # mock_access_info_update.assert_called_with( # context, storage_id, access_info_new) # mock_storage_update.assert_called_with( # context, storage_id, storage) # access_info_new['rest']['password'] = "cGFzc3dvcmQ=" # self.assertDictEqual(access_info_new, updated) # # Case: Wrong storage serial number # wrong_storage = copy.deepcopy(STORAGE) # wrong_storage['serial_number'] = '00000' # mock_storage.return_value = wrong_storage # with self.assertRaises(exception.StorageSerialNumberMismatch) as exc: # api.update_access_info(context, access_info_new) # msg = "Serial number 00000 does not match " \ # "the existing storage serial number" # self.assertIn(msg, str(exc.exception)) # # Case: No storage serial number # wrong_storage.pop('serial_number') # mock_storage.return_value = wrong_storage # with self.assertRaises(exception.InvalidResults) as exc: # api.update_access_info(context, access_info_new) # msg = "Serial number should be provided by storage" # self.assertIn(msg, str(exc.exception)) # # Case: No storage # mock_storage.return_value = None # with self.assertRaises(exception.StorageBackendNotFound) as exc: # api.update_access_info(context, access_info_new) # msg = "Storage backend could not be found" # self.assertIn(msg, str(exc.exception)) @mock.patch('delfin.drivers.manager.DriverManager.get_driver') @mock.patch('delfin.db.storage_get') @mock.patch('delfin.db.storage_create') @mock.patch('delfin.db.access_info_create') @mock.patch('delfin.db.storage_get_all') def test_remove_storage(self, mock_storage, mock_access_info, mock_storage_create, mock_get_storage, mock_dm): storage = copy.deepcopy(STORAGE) storage['id'] = '12345' mock_storage.return_value = None mock_access_info.return_value = ACCESS_INFO mock_storage_create.return_value = storage api = API() api.discover_storage(context, ACCESS_INFO) mock_get_storage.return_value = None mock_dm.return_value = FakeStorageDriver() storage_id = '12345' # Verify that driver instance not added to factory driver = api.driver_manager.driver_factory.get(storage_id, None) self.assertIsNone(driver) api.remove_storage(context, storage_id) driver = api.driver_manager.driver_factory.get(storage_id, None) self.assertIsNone(driver) @mock.patch.object(FakeStorageDriver, 'get_storage') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_get_storage(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() storage = copy.deepcopy(STORAGE) storage['id'] = '12345' mock_fake.return_value = storage api = API() storage_id = '12345' api.get_storage(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called() @mock.patch.object(FakeStorageDriver, 'list_storage_pools') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_storage_pools(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_storage_pools(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_volumes') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_volumes(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_volumes(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_controllers') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_controllers(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_controllers(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_disks') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_disks(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_disks(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_ports') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_ports(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_ports(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_filesystems') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_filesystems(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_filesystems(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_qtrees') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_qtrees(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_qtrees(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_shares') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_shares(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_shares(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'parse_alert') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') @mock.patch('delfin.db.access_info_get') def test_parse_alert(self, mock_access_info, driver_manager, mock_fake): mock_access_info.return_value = ACCESS_INFO driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.parse_alert(context, storage_id, 'alert') mock_access_info.assert_called_once() driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_get_capabilities(self, driver_manager): driver_manager.return_value = FakeStorageDriver() storage_id = '12345' capabilities = API().get_capabilities(context, storage_id) self.assertTrue('resource_metrics' in capabilities) driver_manager.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_storage_host_initiators') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_storage_host_initiators(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_storage_host_initiators(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_storage_hosts') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_storage_hosts(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_storage_hosts(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_storage_host_groups') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_storage_host_groups(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_storage_host_groups(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_port_groups') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_port_groups(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_port_groups(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_volume_groups') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_volume_groups(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_volume_groups(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_masking_views') @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_list_masking_views(self, driver_manager, mock_fake): driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() storage_id = '12345' api.list_masking_views(context, storage_id) driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch('delfin.drivers.manager.DriverManager.get_driver') def test_collect_perf_metrics(self, driver_manager): driver_manager.return_value = FakeStorageDriver() storage_id = '12345' capabilities = API().get_capabilities(context, storage_id) metrics = API().collect_perf_metrics(context, storage_id, capabilities['resource_metrics'], 1622808000000, 1622808000001) self.assertTrue('resource_metrics' in capabilities) self.assertTrue(True, isinstance(metrics[0], constants.metric_struct)) self.assertEqual(driver_manager.call_count, 2) ================================================ FILE: delfin/tests/unit/drivers/test_manager.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import TestCase, mock from delfin.common import config # noqa from delfin.drivers.manager import DriverManager sys.modules['delfin.cryptor'] = mock.Mock() class TestDriverManager(TestCase): def test_init(self): manager = DriverManager() self.assertIsNotNone(manager.driver_factory) ================================================ FILE: delfin/tests/unit/exporter/prometheus/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/exporter/prometheus/test_prometheus.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os from unittest import TestCase from delfin.exporter.prometheus import prometheus from delfin.common.constants import metric_struct fake_metrics = [metric_struct(name='throughput', labels={'storage_id': '12345', 'resource_type': 'storage', 'resource_id': 'storage0', 'type': 'RAW', 'unit': 'MB/s'}, values={1622808000000: 61.9388895680357})] class TestPrometheusExporter(TestCase): def test_push_to_prometheus(self): prometheus_obj = prometheus.PrometheusExporter() prometheus_obj.metrics_dir = os.getcwd() prometheus_obj.push_to_prometheus(fake_metrics) self.assertTrue(glob.glob(prometheus_obj.metrics_dir + '/' + '*.prom')) ================================================ FILE: delfin/tests/unit/fake_data.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from delfin.db.sqlalchemy import models def fake_storage_pool_create(): fake_storage_pools = [models.StoragePool(), models.StoragePool()] fake_storage_pools[0] = {'id': '14155a1f-f053-4ccb-a846-ed67e4387428', 'storage_id': '12c2d52f-01bc-41f5-b73f' '-7abf6f38a2a6', 'name': 'SRP_1', 'status': 'normal', 'created_at': '2020-06-10T07:17:08.707356', 'updated_at': '2020-06-10T07:17:08.707356', 'native_storage_pool_id': 'SRP_1', 'storage_type': 'block', 'total_capacity': 26300318136401, 'used_capacity': 19054536509358, 'free_capacity': 7245781627043, 'subscribed_capacity': 219902325555200, "description": "fake storage Pool", } fake_storage_pools[1] = {'id': "95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df", 'storage_id': "84d0c5f7-2349-401c-8672" "-f76214d13cab", 'name': "SRP_2", 'status': "normal", 'created_at': "2020-06-10T07:17:08.707356", 'updated_at': "2020-06-10T07:17:08.707356", 'native_storage_pool_id': "SRP_2", 'extra': "extra attrib", # invalid key 'storage_type': "block", 'total_capacity': 26300318136401, 'used_capacity': 19054536509358, 'free_capacity': 7245781627043, 'subscribed_capacity': 219902325555200, 'description': "fake storage Pool", } return fake_storage_pools def fake_expected_storage_pool_create(): expected = [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "SRP_1", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_pool_id": "SRP_1", "description": "fake storage Pool", "status": "normal", "storage_type": "block", "total_capacity": 26300318136401, "used_capacity": 19054536509358, "free_capacity": 7245781627043, 'subscribed_capacity': 219902325555200 }, { "created_at": "2020-06-10T07:17:08.707359", "updated_at": "2020-06-10T07:17:08.707356", "id": "95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df", "name": "SRP_2", "storage_id": '84d0c5f7-2349-401c-8672-f76214d13cab', "native_storage_pool_id": "SRP_2", "description": "fake storage Pool", "status": "normal", "storage_type": "block", "total_capacity": 26300318136401, "used_capacity": 19054536509358, "free_capacity": 7245781627043, 'subscribed_capacity': 219902325555200, } ] return expected def fake_storage_host_initiator_create(): fake_storage_host_initiators = [models.StorageHostInitiator()] fake_storage_host_initiators[0] = { 'id': '14155a1f-f053-4ccb-a846-ed67e4387428', 'storage_id': '12c2d52f-01bc-41f5-b73f' '-7abf6f38a2a6', 'name': 'storage_host_initiator_1', 'description': 'storage_host_initiator_1', 'alias': 'storage_host_initiator_1', 'status': 'normal', 'created_at': '2020-06-10T07:17:08.707356', 'updated_at': '2020-06-10T07:17:08.707356', 'native_storage_host_initiator_id': 'storage_host_initiator_1', 'native_storage_host_id': 'storage_host_1', 'wwn': 'wwn1', 'type': 'fc'} return fake_storage_host_initiators def fake_expected_storage_host_initiator_create(): expected = [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "storage_host_initiator_1", "description": "storage_host_initiator_1", "alias": "storage_host_initiator_1", "status": "normal", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_host_initiator_id": "storage_host_initiator_1", "native_storage_host_id": "storage_host_1", "wwn": "wwn1", "type": "fc" }, ] return expected def fake_storage_host_create(): fake_storage_hosts = [models.StorageHost()] fake_storage_hosts[0] = { 'id': '14155a1f-f053-4ccb-a846-ed67e4387428', 'storage_id': '12c2d52f-01bc-41f5-b73f' '-7abf6f38a2a6', 'name': 'storage_host_1', 'description': 'storage_host_1', 'ip_address': '1.2.3.4', 'status': 'normal', 'os_type': 'linux', 'created_at': '2020-06-10T07:17:08.707356', 'updated_at': '2020-06-10T07:17:08.707356', 'native_storage_host_id': 'storage_host_1', } return fake_storage_hosts def fake_expected_storage_host_create(): expected = [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "storage_host_1", "description": "storage_host_1", "ip_address": "1.2.3.4", "os_type": "linux", "status": "normal", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_host_id": "storage_host_1", }, ] return expected def fake_storage_host_group_create(): fake_storage_host_groups = [models.StorageHostGroup()] fake_storage_host_groups[0] = { 'id': '14155a1f-f053-4ccb-a846-ed67e4387428', 'storage_id': '12c2d52f-01bc-41f5-b73f' '-7abf6f38a2a6', 'name': 'storage_host_group_1', 'description': 'storage_host_group_1', 'created_at': '2020-06-10T07:17:08.707356', 'updated_at': '2020-06-10T07:17:08.707356', 'native_storage_host_group_id': 'storage_host_group_1', } return fake_storage_host_groups def fake_expected_storage_host_group_create(): expected = [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "storage_host_group_1", "description": "storage_host_group_1", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_host_group_id": "storage_host_group_1", }, ] return expected def fake_port_group_create(): fake_port_groups = [models.PortGroup()] fake_port_groups[0] = { 'id': '14155a1f-f053-4ccb-a846-ed67e4387428', 'storage_id': '12c2d52f-01bc-41f5-b73f' '-7abf6f38a2a6', 'name': 'port_group_1', 'description': 'port_group_1', 'created_at': '2020-06-10T07:17:08.707356', 'updated_at': '2020-06-10T07:17:08.707356', 'native_port_group_id': 'port_group_1', } return fake_port_groups def fake_expected_port_group_create(): expected = [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "port_group_1", "description": "port_group_1", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_port_group_id": "port_group_1", }, ] return expected def fake_volume_group_create(): fake_volume_groups = [models.VolumeGroup()] fake_volume_groups[0] = { 'id': '14155a1f-f053-4ccb-a846-ed67e4387428', 'storage_id': '12c2d52f-01bc-41f5-b73f' '-7abf6f38a2a6', 'name': 'volume_group_1', 'description': 'volume_group_1', 'created_at': '2020-06-10T07:17:08.707356', 'updated_at': '2020-06-10T07:17:08.707356', 'native_volume_group_id': 'volume_group_1', } return fake_volume_groups def fake_expected_volume_groups_create(): expected = [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "volume_group_1", "description": "volume_group_1", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_volume_group_id": "volume_group_1", }, ] return expected def fake_masking_view_create(): fake_masking_views = [models.MaskingView()] fake_masking_views[0] = { 'id': '14155a1f-f053-4ccb-a846-ed67e4387428', 'storage_id': '12c2d52f-01bc-41f5-b73f' '-7abf6f38a2a6', 'name': 'masking_view_1', 'description': 'masking_view_1', 'created_at': '2020-06-10T07:17:08.707356', 'updated_at': '2020-06-10T07:17:08.707356', 'native_storage_host_id': 'storage_host_1', 'native_volume_id': 'volume_1', 'native_masking_view_id': 'masking_view_1', } return fake_masking_views def fake_expected_masking_views_create(): expected = [ { "created_at": "2020-06-10T07:17:08.707356", "updated_at": "2020-06-10T07:17:08.707356", "id": "14155a1f-f053-4ccb-a846-ed67e4387428", "name": "masking_view_1", "description": "masking_view_1", "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_host_id": "storage_host_1", "native_volume_id": "volume_1", "native_storage_host_group_id": None, "native_port_group_id": None, "native_volume_group_id": None, "native_masking_view_id": "masking_view_1", }, ] return expected ================================================ FILE: delfin/tests/unit/fake_notifier.py ================================================ # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools import oslo_messaging as messaging from oslo_serialization import jsonutils from delfin import rpc NOTIFICATIONS = [] def reset(): del NOTIFICATIONS[:] FakeMessage = collections.namedtuple( 'Message', ['publisher_id', 'priority', 'event_type', 'payload'], ) class FakeNotifier(object): def __init__(self, transport, publisher_id=None, serializer=None): self.transport = transport self.publisher_id = publisher_id for priority in ['debug', 'info', 'warn', 'error', 'critical']: setattr(self, priority, functools.partial(self._notify, priority.upper())) self._serializer = serializer or messaging.serializer.NoOpSerializer() def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id, self._serializer) def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(sileht): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly jsonutils.to_primitive(payload) msg = dict(publisher_id=self.publisher_id, priority=priority, event_type=event_type, payload=payload) NOTIFICATIONS.append(msg) def stub_notifier(testcase): testcase.mock_object(messaging, 'Notifier', FakeNotifier) if rpc.NOTIFIER: serializer = getattr(rpc.NOTIFIER, '_serializer', None) testcase.mock_object(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport, rpc.NOTIFIER.publisher_id, serializer=serializer)) ================================================ FILE: delfin/tests/unit/leader_election/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/leader_election/distributor/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/leader_election/distributor/test_task_distributor.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_utils import uuidutils from delfin import context from delfin import db from delfin import test from delfin.common import constants from delfin.db.sqlalchemy.models import Task from delfin.leader_election.distributor.task_distributor import TaskDistributor fake_telemetry_job = { Task.id.name: 2, Task.storage_id.name: uuidutils.generate_uuid(), Task.args.name: {}, Task.interval.name: 10, Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD, Task.last_run_time.name: None, Task.deleted.name: 0, } fake_telemetry_jobs = [ fake_telemetry_job, ] class TestTaskDistributor(test.TestCase): @mock.patch('delfin.coordination.ConsistentHashing.get_task_executor') @mock.patch('delfin.coordination.ConsistentHashing.start') @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.assign_job') @mock.patch.object(db, 'task_update') @mock.patch('delfin.coordination.ConsistentHashing.__init__', mock.Mock(return_value=None)) def test_distribute_new_job(self, mock_task_update, mock_assign_job, mock_partitioner_start, mock_get_task_executor): ctx = context.get_admin_context() task_distributor = TaskDistributor(ctx) task_distributor.distribute_new_job('fake_task_id') self.assertEqual(mock_assign_job.call_count, 1) self.assertEqual(mock_task_update.call_count, 1) self.assertEqual(mock_partitioner_start.call_count, 1) self.assertEqual(mock_get_task_executor.call_count, 1) ================================================ FILE: delfin/tests/unit/task_manager/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/task_manager/scheduler/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/task_manager/scheduler/schedulers/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/test_failed_performance_collection_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from unittest import mock from oslo_utils import uuidutils from delfin import context from delfin import db from delfin import exception from delfin import test from delfin.common.constants import TelemetryCollection from delfin.common.constants import TelemetryTaskStatus, TelemetryJobStatus from delfin.db.sqlalchemy.models import FailedTask from delfin.db.sqlalchemy.models import Task from delfin.task_manager.scheduler.schedulers.telemetry. \ failed_performance_collection_handler import \ FailedPerformanceCollectionHandler fake_failed_job_id = 43 fake_failed_job = { FailedTask.id.name: fake_failed_job_id, FailedTask.retry_count.name: 0, FailedTask.result.name: "Init", FailedTask.job_id.name: uuidutils.generate_uuid(), FailedTask.task_id.name: uuidutils.generate_uuid(), FailedTask.method.name: FailedPerformanceCollectionHandler.__module__ + '.' + FailedPerformanceCollectionHandler.__name__, FailedTask.start_time.name: int(datetime.now().timestamp()), FailedTask.end_time.name: int(datetime.now().timestamp()) + 20, FailedTask.interval.name: 20, FailedTask.deleted.name: False, FailedTask.executor.name: 'node1', } fake_deleted_storage_failed_job = { FailedTask.id.name: fake_failed_job_id, FailedTask.retry_count.name: 0, FailedTask.result.name: "Init", FailedTask.job_id.name: uuidutils.generate_uuid(), FailedTask.task_id.name: uuidutils.generate_uuid(), FailedTask.method.name: FailedPerformanceCollectionHandler.__module__ + '.' + FailedPerformanceCollectionHandler.__name__, FailedTask.start_time.name: int(datetime.now().timestamp()), FailedTask.end_time.name: int(datetime.now().timestamp()) + 20, FailedTask.interval.name: 20, FailedTask.deleted.name: True, FailedTask.executor.name: 'node1', } fake_telemetry_job = { Task.id.name: 2, Task.storage_id.name: uuidutils.generate_uuid(), Task.args.name: {}, Task.executor.name: 'node1', } def failed_task_not_found_exception(ctx, failed_task_id): raise exception.FailedTaskNotFound("Failed Task not found.") class TestFailedPerformanceCollectionHandler(test.TestCase): @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'failed_task_get', mock.Mock(return_value=fake_failed_job)) @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job') @mock.patch('delfin.db.failed_task_update') @mock.patch('delfin.task_manager.tasks.telemetry' '.PerformanceCollectionTask.collect') def test_failed_job_success(self, mock_collect_telemetry, mock_failed_task_update, mock_failed_job): mock_collect_telemetry.return_value = TelemetryTaskStatus. \ TASK_EXEC_STATUS_SUCCESS ctx = context.get_admin_context() failed_job_handler = FailedPerformanceCollectionHandler.get_instance( ctx, fake_failed_job_id) # call failed job failed_job_handler() self.assertEqual(mock_failed_job.call_count, 1) mock_failed_task_update.assert_called_once_with( ctx, fake_failed_job_id, { FailedTask.retry_count.name: 1, FailedTask.result.name: TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS}) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'failed_task_get', mock.Mock(return_value=fake_failed_job)) @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job') @mock.patch('delfin.db.failed_task_update') @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry') def test_failed_job_failure(self, mock_collect_telemetry, mock_failed_task_update, mock_failed_job): mock_collect_telemetry.return_value = TelemetryTaskStatus. \ TASK_EXEC_STATUS_FAILURE ctx = context.get_admin_context() failed_job_handler = FailedPerformanceCollectionHandler.get_instance( ctx, fake_failed_job_id) # retry # call failed job failed_job_handler() self.assertEqual(mock_failed_job.call_count, 0) mock_failed_task_update.assert_called_once_with( ctx, fake_failed_job_id, { FailedTask.retry_count.name: 1, FailedTask.result.name: TelemetryJobStatus.FAILED_JOB_STATUS_RETRYING}) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'failed_task_get') @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job') @mock.patch('delfin.db.failed_task_update') @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry') def test_failed_job_fail_max_times(self, mock_collect_telemetry, mock_failed_task_update, mock_remove_job, mock_failed_task_get): mock_collect_telemetry.return_value = TelemetryTaskStatus. \ TASK_EXEC_STATUS_FAILURE failed_job = fake_failed_job.copy() failed_job[ FailedTask.retry_count.name] = \ TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT - 1 # return with maximum retry count mock_failed_task_get.return_value = failed_job ctx = context.get_admin_context() failed_job_handler = FailedPerformanceCollectionHandler.get_instance( ctx, fake_failed_job_id) # call failed job failed_job_handler() self.assertEqual(mock_remove_job.call_count, 1) mock_failed_task_update.assert_called_once_with( ctx, fake_failed_job_id, { FailedTask.retry_count.name: TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT, FailedTask.result.name: TelemetryJobStatus.FAILED_JOB_STATUS_INIT}) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'failed_task_get', mock.Mock(return_value=fake_deleted_storage_failed_job)) @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job') @mock.patch('delfin.db.failed_task_update') @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry') def test_failed_job_deleted_storage(self, mock_collect_telemetry, mock_failed_task_update, mock_pause_job): ctx = context.get_admin_context() failed_job_handler = FailedPerformanceCollectionHandler.get_instance( ctx, fake_failed_job_id) failed_job_handler() # Verify that no action performed for deleted storage failed tasks self.assertEqual(mock_collect_telemetry.call_count, 0) self.assertEqual(mock_failed_task_update.call_count, 0) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'failed_task_get', failed_task_not_found_exception) @mock.patch( 'delfin.task_manager.metrics_rpcapi.TaskAPI.remove_failed_job', mock.Mock()) @mock.patch('delfin.db.failed_task_update') @mock.patch('delfin.task_manager.rpcapi.TaskAPI.collect_telemetry') def test_deleted_storage_exception(self, mock_collect_telemetry, mock_failed_task_update): ctx = context.get_admin_context() failed_job_handler = FailedPerformanceCollectionHandler( ctx, 1122, '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', '', 1234, 2, 1122334400, 1122334800, 'node1') failed_job_handler() # Verify that no action performed for deleted storage failed tasks self.assertEqual(mock_collect_telemetry.call_count, 0) self.assertEqual(mock_failed_task_update.call_count, 0) ================================================ FILE: delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/test_job_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from datetime import datetime from oslo_utils import uuidutils from delfin import context from delfin import db from delfin import test from delfin.common import constants from delfin.db.sqlalchemy.models import Task from delfin.task_manager.scheduler.schedulers.telemetry.job_handler import \ JobHandler from delfin.task_manager.scheduler.schedulers.telemetry.job_handler import \ FailedJobHandler from delfin.db.sqlalchemy.models import FailedTask from delfin.task_manager.scheduler.schedulers.telemetry. \ failed_performance_collection_handler import \ FailedPerformanceCollectionHandler from delfin.common.constants import TelemetryCollection fake_executor = 'node1' fake_telemetry_job = { Task.id.name: 2, Task.storage_id.name: uuidutils.generate_uuid(), Task.args.name: {}, Task.interval.name: 10, Task.job_id.name: None, Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD, Task.last_run_time.name: None, Task.executor.name: fake_executor, Task.deleted.name: False, } fake_telemetry_jobs = [ fake_telemetry_job, ] fake_telemetry_job_deleted = { Task.id.name: 2, Task.storage_id.name: uuidutils.generate_uuid(), Task.args.name: {}, Task.interval.name: 10, Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD, Task.last_run_time.name: None, Task.deleted.name: True, Task.executor.name: fake_executor, } fake_telemetry_jobs_deleted = [ fake_telemetry_job_deleted, ] # With method name as None Incorrect_telemetry_job = { Task.id.name: 2, Task.storage_id.name: uuidutils.generate_uuid(), Task.args.name: {}, Task.interval.name: 10, Task.method.name: None, Task.last_run_time.name: None, Task.executor.name: None, } Incorrect_telemetry_jobs = [ Incorrect_telemetry_job, ] fake_failed_job = { FailedTask.id.name: 43, FailedTask.retry_count.name: 0, FailedTask.result.name: "Init", FailedTask.job_id.name: "fake_job_id", FailedTask.task_id.name: uuidutils.generate_uuid(), FailedTask.method.name: FailedPerformanceCollectionHandler.__module__ + '.' + FailedPerformanceCollectionHandler.__name__, FailedTask.start_time.name: int(datetime.now().timestamp()), FailedTask.end_time.name: int(datetime.now().timestamp()) + 20, FailedTask.interval.name: 20, FailedTask.deleted.name: False, FailedTask.executor.name: fake_executor, } fake_failed_jobs = [ fake_failed_job, ] class TestTelemetryJob(test.TestCase): @mock.patch.object(db, 'task_get_all', mock.Mock(return_value=fake_telemetry_jobs)) @mock.patch.object(db, 'task_update', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.add_job') def test_telemetry_job_scheduling(self, mock_add_job): ctx = context.get_admin_context() telemetry_job = JobHandler(ctx, fake_telemetry_job['id'], fake_telemetry_job['storage_id'], fake_telemetry_job['args'], fake_telemetry_job['interval']) # call telemetry job scheduling telemetry_job.schedule_job(fake_telemetry_job['id']) self.assertEqual(mock_add_job.call_count, 1) @mock.patch.object(db, 'task_delete', mock.Mock()) @mock.patch.object(db, 'task_get_all', mock.Mock(return_value=fake_telemetry_jobs_deleted)) @mock.patch.object(db, 'task_update', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.add_job', mock.Mock()) @mock.patch('logging.LoggerAdapter.error') def test_telemetry_removal_success(self, mock_log_error): ctx = context.get_admin_context() telemetry_job = JobHandler(ctx, fake_telemetry_job['id'], fake_telemetry_job['storage_id'], fake_telemetry_job['args'], fake_telemetry_job['interval']) # call telemetry job scheduling telemetry_job.remove_job(fake_telemetry_job['id']) self.assertEqual(mock_log_error.call_count, 0) class TestFailedTelemetryJob(test.TestCase): @mock.patch.object(db, 'failed_task_get_all', mock.Mock(return_value=fake_failed_jobs)) @mock.patch.object(db, 'failed_task_update', mock.Mock(return_value=fake_failed_job)) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch.object(db, 'failed_task_get', mock.Mock(return_value=fake_failed_job)) @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.add_job') def test_failed_job_scheduling(self, mock_add_job): failed_job = FailedJobHandler(context.get_admin_context()) # call failed job scheduling failed_job.schedule_failed_job(fake_failed_job['id']) self.assertEqual(mock_add_job.call_count, 1) @mock.patch.object(db, 'failed_task_get', mock.Mock(return_value=fake_failed_job)) @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.remove_job') @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.get_job') @mock.patch.object(db, 'failed_task_delete') @mock.patch.object(db, 'failed_task_get_all') def test_failed_job_with_max_retry(self, mock_failed_get_all, mock_failed_task_delete, mock_get_job, mock_remove_job): # configure to return entry with max retry count failed_jobs = fake_failed_jobs.copy() failed_jobs[0][FailedTask.retry_count.name] = \ TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT mock_failed_get_all.return_value = failed_jobs failed_job = FailedJobHandler(context.get_admin_context()) # call failed job scheduling failed_job.schedule_failed_job(failed_jobs[0]) mock_get_job.return_value = True # entry get deleted and job get removed self.assertEqual(mock_failed_task_delete.call_count, 1) self.assertEqual(mock_remove_job.call_count, 1) @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.get_job') @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.add_job') @mock.patch.object(db, 'failed_task_get_all') def test_failed_job_with_job_already_scheduled(self, mock_failed_get_all, mock_add_job, mock_get_job): # configure to return entry with job id failed_jobs = fake_failed_jobs.copy() failed_jobs[0][FailedTask.job_id.name] = uuidutils.generate_uuid() mock_failed_get_all.return_value = failed_jobs # configure to have job in scheduler mock_get_job.return_value = failed_jobs failed_job = FailedJobHandler(context.get_admin_context()) # call failed job scheduling failed_job.remove_failed_job(fake_failed_job['id']) # the job will not be scheduled self.assertEqual(mock_add_job.call_count, 0) @mock.patch.object(db, 'failed_task_get', mock.Mock(return_value=fake_failed_job)) @mock.patch( 'apscheduler.schedulers.background.BackgroundScheduler.remove_job') @mock.patch.object(db, 'failed_task_delete') @mock.patch.object(db, 'failed_task_get_all') def test_failed_job_scheduling_with_no_task(self, mock_failed_get_all, mock_failed_task_delete, mock_remove_job): # configure to return entry with max retry count failed_jobs = fake_failed_jobs.copy() failed_jobs[0][FailedTask.job_id.name] = uuidutils.generate_uuid() mock_failed_get_all.return_value = failed_jobs failed_job = FailedJobHandler(context.get_admin_context()) # call failed job scheduling failed_job.remove_failed_job(fake_failed_job) # entry get deleted and job get removed self.assertEqual(mock_failed_task_delete.call_count, 1) self.assertEqual(mock_remove_job.call_count, 0) ================================================ FILE: delfin/tests/unit/task_manager/scheduler/schedulers/telemetry/test_performance_collection_handler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_utils import uuidutils from delfin import context from delfin import db from delfin import exception from delfin import test from delfin.common import constants from delfin.common.constants import TelemetryTaskStatus from delfin.db.sqlalchemy.models import Task from delfin.task_manager.scheduler.schedulers.telemetry. \ performance_collection_handler import \ PerformanceCollectionHandler fake_task_id = 43 fake_executor = 'node1' fake_storage_id = '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6' fake_telemetry_job = { Task.id.name: 2, Task.storage_id.name: uuidutils.generate_uuid(), Task.args.name: {}, Task.interval.name: 10, Task.deleted.name: False, Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD, Task.executor.name: fake_executor } fake_deleted_telemetry_job = { Task.id.name: 2, Task.storage_id.name: uuidutils.generate_uuid(), Task.args.name: {}, Task.interval.name: 10, Task.deleted.name: True, Task.method.name: constants.TelemetryCollection.PERFORMANCE_TASK_METHOD, Task.executor.name: fake_executor } def task_not_found_exception(ctx, task_id): raise exception.TaskNotFound("Task not found.") class TestPerformanceCollectionHandler(test.TestCase): @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch('delfin.db.task_update') @mock.patch('delfin.task_manager.tasks.telemetry' '.PerformanceCollectionTask.collect') def test_performance_collection_success(self, mock_collect_telemetry, mock_task_update): mock_collect_telemetry.return_value = TelemetryTaskStatus. \ TASK_EXEC_STATUS_SUCCESS ctx = context.get_admin_context() perf_collection_handler = PerformanceCollectionHandler.get_instance( ctx, fake_task_id) # call performance collection handler perf_collection_handler() self.assertEqual(mock_collect_telemetry.call_count, 1) self.assertEqual(mock_task_update.call_count, 1) @mock.patch('delfin.db.task_update') @mock.patch('delfin.task_manager.metrics_rpcapi.TaskAPI.assign_failed_job') @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_telemetry_job)) @mock.patch('delfin.db.failed_task_create') @mock.patch('delfin.task_manager.tasks.telemetry' '.PerformanceCollectionTask.collect') @mock.patch('delfin.drivers.api.API.get_capabilities') def test_performance_collection_failure(self, mock_get_capabilities, mock_collect_telemetry, mock_failed_task_create, mock_assign_failed_job, mock_task_update): mock_get_capabilities.return_value = {} mock_collect_telemetry.return_value = TelemetryTaskStatus. \ TASK_EXEC_STATUS_FAILURE ctx = context.get_admin_context() perf_collection_handler = PerformanceCollectionHandler.get_instance( ctx, fake_task_id) # call performance collection handler perf_collection_handler() # Verify that failed task create is called if collect telemetry fails self.assertEqual(mock_failed_task_create.call_count, 1) self.assertEqual(mock_assign_failed_job.call_count, 1) self.assertEqual(mock_task_update.call_count, 1) @mock.patch.object(db, 'task_get', mock.Mock(return_value=fake_deleted_telemetry_job)) @mock.patch('delfin.db.task_update') @mock.patch('delfin.task_manager.tasks.telemetry' '.PerformanceCollectionTask.collect') def test_performance_collection_deleted_storage(self, mock_collect_telemetry, mock_task_update): mock_collect_telemetry.return_value = TelemetryTaskStatus. \ TASK_EXEC_STATUS_SUCCESS ctx = context.get_admin_context() perf_collection_handler = PerformanceCollectionHandler.get_instance( ctx, fake_task_id) perf_collection_handler() # Verify that collect telemetry and db updated is not called # for deleted storage self.assertEqual(mock_collect_telemetry.call_count, 0) self.assertEqual(mock_task_update.call_count, 0) @mock.patch('delfin.db.task_get', task_not_found_exception) @mock.patch('delfin.task_manager.tasks.telemetry' '.PerformanceCollectionTask.collect') def test_deleted_storage_exception(self, mock_collect_telemetry): ctx = context.get_admin_context() perf_collection_handler = PerformanceCollectionHandler(ctx, fake_task_id, fake_storage_id, "", 100, fake_executor) perf_collection_handler() # Verify that collect telemetry for deleted storage self.assertEqual(mock_collect_telemetry.call_count, 0) ================================================ FILE: delfin/tests/unit/task_manager/scheduler/test_scheduler.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from apscheduler.schedulers.background import BackgroundScheduler from delfin import db from delfin import test from delfin.coordination import ConsistentHashing from delfin.leader_election.distributor.task_distributor \ import TaskDistributor from delfin.task_manager.metrics_rpcapi import TaskAPI from delfin.task_manager.scheduler import schedule_manager FAKE_TASKS = [ { 'id': 1, 'executor': 'node1' }, { 'id': 2, 'executor': 'node2' }, { 'id': 3, 'executor': 'node1' } ] class TestScheduler(test.TestCase): def test_scheduler_manager_singleton(self): first_instance = schedule_manager.SchedulerManager().get_scheduler() self.assertIsInstance(first_instance, BackgroundScheduler) second_instance = schedule_manager.SchedulerManager().get_scheduler() self.assertIsInstance(second_instance, BackgroundScheduler) self.assertEqual(first_instance, second_instance) @mock.patch.object(BackgroundScheduler, 'start') def test_start(self, mock_scheduler_start): manager = schedule_manager.SchedulerManager() manager.start() self.assertEqual(mock_scheduler_start.call_count, 1) manager.start() self.assertEqual(mock_scheduler_start.call_count, 1) @mock.patch('tooz.coordination.get_coordinator', mock.Mock()) @mock.patch.object(ConsistentHashing, 'get_task_executor') @mock.patch.object(TaskAPI, 'remove_job') @mock.patch.object(TaskDistributor, 'distribute_new_job') @mock.patch.object(db, 'task_get_all') def test_on_node_join(self, mock_task_get_all, mock_distribute_new_job, mock_remove_job, mock_get_task_executor): node1_job_count = 0 node2_job_count = 0 for job in FAKE_TASKS: if job['executor'] == 'node1': node1_job_count += 1 elif job['executor'] == 'node2': node2_job_count += 1 mock_task_get_all.return_value = FAKE_TASKS mock_get_task_executor.return_value = 'node1' manager = schedule_manager.SchedulerManager() manager.on_node_join(mock.Mock(member_id=b'fake_member_id', group_id='node1')) self.assertEqual(mock_task_get_all.call_count, 1) self.assertEqual(mock_distribute_new_job.call_count, node1_job_count + node2_job_count) self.assertEqual(mock_remove_job.call_count, node2_job_count) self.assertEqual(mock_get_task_executor.call_count, node1_job_count + node2_job_count) @mock.patch.object(TaskDistributor, 'distribute_new_job') @mock.patch.object(db, 'task_get_all') def test_on_node_leave(self, mock_task_get_all, mock_distribute_new_job): mock_task_get_all.return_value = FAKE_TASKS manager = schedule_manager.SchedulerManager() manager.on_node_leave(mock.Mock(member_id=b'fake_member_id', group_id='fake_group_id')) self.assertEqual(mock_task_get_all.call_count, 1) self.assertEqual(mock_distribute_new_job.call_count, len(FAKE_TASKS)) @mock.patch.object(TaskDistributor, 'distribute_new_job') @mock.patch.object(db, 'task_get_all') def test_recover_job(self, mock_task_get_all, mock_distribute_new_job): mock_task_get_all.return_value = FAKE_TASKS manager = schedule_manager.SchedulerManager() manager.recover_job() self.assertEqual(mock_task_get_all.call_count, 1) self.assertEqual(mock_distribute_new_job.call_count, len(FAKE_TASKS)) ================================================ FILE: delfin/tests/unit/task_manager/test_alert_task.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from delfin import context from delfin import db from delfin import exception from delfin import test from delfin.common import constants from delfin.task_manager.tasks import alerts fake_storage = { 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', 'name': 'fake_driver', 'description': 'it is a fake driver.', 'vendor': 'fake_vendor', 'model': 'fake_model', 'status': 'normal', 'serial_number': '2102453JPN12KA000011', 'firmware_version': '1.0.0', 'location': 'HK', 'total_capacity': 1024 * 1024, 'used_capacity': 3126, 'free_capacity': 1045449, } fake_alerts = [ { 'alert_id': '1050', 'alert_name': 'SAMPLE_ALERT_NAME', 'severity': constants.Severity.WARNING, 'category': constants.Category.NOT_SPECIFIED, 'type': constants.EventType.EQUIPMENT_ALARM, 'sequence_number': 79, 'description': 'Diagnostic event trace triggered.', 'recovery_advice': 'NA', 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': 'Array id=000192601409,Component type=location1 ' 'Group,Component name=comp1,Event source=symmetrix', }, { 'alert_id': '2000', 'alert_name': 'SAMPLE_ALERT_NAME_2', 'severity': constants.Severity.CRITICAL, 'category': constants.Category.RECOVERY, 'type': constants.EventType.PROCESSING_ERROR_ALARM, 'sequence_number': 50, 'description': 'This is a fake alert.', 'recovery_advice': 'NA', 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'location': 'Array id=000192601409,Component type=location1 ' 'Group,Component name=comp1,Event source=symmetrix', }, ] class TestAlertTask(test.TestCase): @mock.patch.object(db, 'storage_get', mock.Mock(return_value=fake_storage)) @mock.patch('delfin.exporter.base_exporter.AlertExporterManager.dispatch') @mock.patch('delfin.common.alert_util.fill_storage_attributes') @mock.patch('delfin.drivers.api.API.list_alerts') def test_sync_alerts(self, mock_list_alerts, mock_fill_storage_attributes, mock_dispatch): task = alerts.AlertSyncTask() storage_id = fake_storage['id'] # No alert mock_list_alerts.return_value = [] task.sync_alerts(context, storage_id, None) self.assertEqual(db.storage_get.call_count, 1) self.assertEqual(mock_list_alerts.call_count, 1) self.assertEqual(mock_dispatch.call_count, 0) self.assertEqual(mock_fill_storage_attributes.call_count, 0) # Has alert mock_list_alerts.return_value = fake_alerts task.sync_alerts(context, storage_id, None) self.assertEqual(db.storage_get.call_count, 2) self.assertEqual(mock_list_alerts.call_count, 2) self.assertEqual(mock_dispatch.call_count, 1) self.assertEqual(mock_fill_storage_attributes.call_count, len(fake_alerts)) @mock.patch('delfin.drivers.api.API.clear_alert') def test_clear_alerts(self, mock_clear_alert): task = alerts.AlertSyncTask() storage_id = fake_storage['id'] task.clear_alerts(context, storage_id, []) self.assertEqual(mock_clear_alert.call_count, 0) sequence_number_list = ['sequence_number_1', 'sequence_number_2'] task.clear_alerts(context, storage_id, sequence_number_list) self.assertEqual(mock_clear_alert.call_count, len(sequence_number_list)) mock_clear_alert.side_effect = \ exception.AccessInfoNotFound(storage_id) ret = task.clear_alerts(context, storage_id, sequence_number_list) self.assertEqual(ret, []) mock_clear_alert.side_effect = \ exception.Invalid('Fake exception') ret = task.clear_alerts(context, storage_id, sequence_number_list) self.assertEqual(ret, sequence_number_list) ================================================ FILE: delfin/tests/unit/task_manager/test_resources.py ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from delfin.common import config # noqa from delfin.drivers import fake_storage from delfin.task_manager.tasks import resources from delfin.task_manager.tasks.resources import StorageDeviceTask from delfin import test, context, coordination storage = { 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', 'name': 'fake_driver', 'description': 'it is a fake driver.', 'vendor': 'fake_vendor', 'model': 'fake_model', 'status': 'normal', 'serial_number': '2102453JPN12KA000011', 'firmware_version': '1.0.0', 'location': 'HK', 'total_capacity': 1024 * 1024, 'used_capacity': 3126, 'free_capacity': 1045449, } pools_list = [{ 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "name": "fake_pool_" + str(id), "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_storage_pool_id": "fake_original_id_" + str(id), "description": "Fake Pool", "status": "normal", "total_capacity": 1024 * 1024, "used_capacity": 3126, "free_capacity": 1045449, } ] vols_list = [{ 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a340', "name": "fake_vol_" + str(id), "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "description": "Fake Volume", "status": "normal", "native_volume_id": "fake_original_id_" + str(id), "wwn": "fake_wwn_" + str(id), "total_capacity": 1024 * 1024, "used_capacity": 3126, "free_capacity": 1045449, } ] ports_list = [{ 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "name": "fake_pool_" + str(id), "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_port_id": "fake_original_id_" + str(id), "location": "location_25", "connection_status": "disconnected", "health_status": "normal", "type": "iscsi", "logical_type": "service", "speed": 1000, "max_speed": 7200, "native_parent_id": "parent_id", "wwn": "wwn", "mac_address": "mac_352", "ipv4": "127.0.0.1", "ipv4_mask": "255.255.255.0", "ipv6": "", "ipv6_mask": "" } ] controllers_list = [{ 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a222', "name": "fake_controller_" + str(id), "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_controller_id": "fake_original_id_" + str(id), "status": "normal", "location": "loc_100", "soft_version": "ver_321", "cpu_info": "Intel Xenon", "memory_size": 200000, } ] disks_list = [{ 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "name": "fake_pool_" + str(id), "storage_id": '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', "native_disk_id": "fake_original_id_" + str(id), "serial_number": "serial_3299", "manufacturer": "Intel", "model": "model_4565", "firmware": "firmware_9541", "speed": 751, "capacity": 1074, "status": "offline", "physical_type": "sata", "logical_type": "cache", "health_score": 34, "native_disk_group_id": "", } ] quotas_list = [{ "id": "251594c5-aac4-46ad-842f-3daca9176938", "native_quota_id": "fake_original_id_" + str(id), "name": "fake_qutoa_" + str(id), "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", "native_filesystem_id": "fake_filesystem_id_" + str(id), "native_qtree_id": "fake_qtree_id_" + str(id), "capacity_hard_limit": 1000, "capacity_soft_limit": 100, "file_hard_limit": 1000, "file_soft_limit": 100, "file_count": 10000, "used_capacity": 10000, "type": "user" } ] filesystems_list = [{ "id": "fe760f5c-7b4c-42b2-b1ed-ecb4f0b6d6bc", "name": "fake_filesystem_" + str(id), "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", "native_filesystem_id": "fake_original_id_" + str(id), "status": "normal", "type": "thin", "security_mode": "unix", "total_capacity": 1055, "used_capacity": 812, "free_capacity": 243, "compressed": True, "deduplicated": False, "worm": "non_worm" } ] qtrees_list = [{ "id": "251594c5-aac4-46ad-842f-3daca9176938", "name": "fake_qtree_" + str(id), "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", "native_qtree_id": "fake_original_id_" + str(id), "native_filesystem_id": "fake_filesystem_id_" + str(id), "path": "/", "security_mode": "native" } ] shares_list = [{ "id": "4e62c66a-39ef-43f2-9690-e936ca876574", "name": "fake_share_" + str(id), "storage_id": "793b26f9-6f16-4fd5-a6a2-d7453f050a41", "native_share_id": "fake_original_id_" + str(id), "native_filesystem_id": "fake_filesystem_id_" + str(id), "native_qtree_id": "859", "protocol": "nfs", "path": "/" } ] storage_host_initiators_list = [{ "id": "4e62c66a-39ef-43f2-9690-e936ca876574", "name": "storage_host_initiator_" + str(id), "description": "storage_host_initiator_" + str(id), "alias": "storage_host_initiator_" + str(id), "storage_id": "c5c91c98-91aa-40e6-85ac-37a1d3b32bda", "native_storage_host_initiator_id": "storage_host_initiator_" + str(id), "wwn": "wwn_" + str(id), "status": "Normal", "native_storage_host_id": "storage_host_" + str(id), } ] storage_hosts_list = [{ "id": "4e62c66a-39ef-43f2-9690-e936ca876574", "name": "storage_host_" + str(id), "description": "storage_host_" + str(id), "storage_id": "c5c91c98-91aa-40e6-85ac-37a1d3b32bda", "native_storage_host_id": "storage_host_" + str(id), "os_type": "linux", "status": "Normal", "ip_address": "1.2.3.4" } ] storage_hg_list = [{ "id": "4e62c66a-39ef-43f2-9690-e936ca876574", "name": "storage_host_group_" + str(id), "description": "storage_host_group_" + str(id), "storage_id": "c5c91c98-91aa-40e6-85ac-37a1d3b32bda", "native_storage_host_group_id": "storage_host_group_" + str(id), } ] storage_host_groups_list = { 'storage_host_groups': storage_hg_list, 'storage_host_grp_host_rels': '' } empty_shgs_list = { 'storage_host_groups': list(), 'storage_host_grp_host_rels': '' } pg_list = [{ "id": "4e62c66a-39ef-43f2-9690-e936ca876574", "name": "port_group_" + str(id), "description": "port_group_" + str(id), "storage_id": "c5c91c98-91aa-40e6-85ac-37a1d3b32bda", "native_port_group_id": "port_group_" + str(id), } ] port_groups_list = { "port_groups": pg_list, "port_grp_port_rels": '', } empty_port_groups_list = { "port_groups": list(), "port_grp_port_rels": '', } vg_list = [{ "id": "4e62c66a-39ef-43f2-9690-e936ca876574", "name": "volume_group_" + str(id), "description": "volume_group_" + str(id), "storage_id": "c5c91c98-91aa-40e6-85ac-37a1d3b32bda", "native_volume_group_id": "volume_group_" + str(id), } ] volume_groups_list = { 'volume_groups': vg_list, 'vol_grp_vol_rels': '' } empty_volume_groups_list = { 'volume_groups': list(), 'vol_grp_vol_rels': '' } masking_views_list = [{ "id": "4e62c66a-39ef-43f2-9690-e936ca876574", "name": "masking_view_" + str(id), "description": "masking_view_" + str(id), "storage_id": "c5c91c98-91aa-40e6-85ac-37a1d3b32bda", "native_masking_view_id": "masking_view_" + str(id), } ] class TestStorageDeviceTask(test.TestCase): def setUp(self): super(TestStorageDeviceTask, self).setUp() self.driver_api = mock.Mock() self.task_manager = StorageDeviceTask( context, "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6") self.mock_object(self.task_manager, 'driver_api', self.driver_api) @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.get_storage') @mock.patch('delfin.db.storage_update') @mock.patch('delfin.db.storage_get') @mock.patch('delfin.db.storage_delete') @mock.patch('delfin.db.access_info_delete') @mock.patch('delfin.db.alert_source_delete') def test_sync_successful(self, alert_source_delete, access_info_delete, mock_storage_delete, mock_storage_get, mock_storage_update, mock_get_storage, get_lock): storage_obj = resources.StorageDeviceTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') storage_obj.sync() self.assertTrue(get_lock.called) self.assertTrue(mock_storage_get.called) self.assertTrue(mock_storage_delete.called) self.assertTrue(access_info_delete.called) self.assertTrue(alert_source_delete.called) self.assertTrue(mock_storage_update.called) mock_get_storage.assert_called_with( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') fake_storage_obj = fake_storage.FakeStorageDriver() mock_get_storage.return_value = fake_storage_obj.get_storage(context) storage_obj.sync() @mock.patch('delfin.db.storage_delete') @mock.patch('delfin.db.alert_source_delete') def test_successful_remove(self, mock_alert_del, mock_strg_del): storage_obj = resources.StorageDeviceTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') storage_obj.remove() mock_strg_del.assert_called_with( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') mock_alert_del.assert_called_with( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') class TestStoragePoolTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_storage_pools') @mock.patch('delfin.db.storage_pool_get_all') @mock.patch('delfin.db.storage_pools_delete') @mock.patch('delfin.db.storage_pools_update') @mock.patch('delfin.db.storage_pools_create') def test_sync_successful(self, mock_pool_create, mock_pool_update, mock_pool_del, mock_pool_get_all, mock_list_pools, get_lock): pool_obj = resources.StoragePoolTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') pool_obj.sync() self.assertTrue(mock_list_pools.called) self.assertTrue(mock_pool_get_all.called) self.assertTrue(get_lock.called) # collect the pools from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the new pool to DB mock_list_pools.return_value = fake_storage_obj.list_storage_pools( context) mock_pool_get_all.return_value = list() pool_obj.sync() self.assertTrue(mock_pool_create.called) # update the new pool of DB mock_list_pools.return_value = pools_list mock_pool_get_all.return_value = pools_list pool_obj.sync() self.assertTrue(mock_pool_update.called) # delete the new pool to DB mock_list_pools.return_value = list() mock_pool_get_all.return_value = pools_list pool_obj.sync() self.assertTrue(mock_pool_del.called) @mock.patch('delfin.db.storage_pool_delete_by_storage') def test_remove(self, mock_pool_del): pool_obj = resources.StoragePoolTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') pool_obj.remove() self.assertTrue(mock_pool_del.called) class TestStorageVolumeTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_volumes') @mock.patch('delfin.db.volume_get_all') @mock.patch('delfin.db.volumes_delete') @mock.patch('delfin.db.volumes_update') @mock.patch('delfin.db.volumes_create') def test_sync_successful(self, mock_vol_create, mock_vol_update, mock_vol_del, mock_vol_get_all, mock_list_vols, get_lock): vol_obj = resources.StorageVolumeTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') vol_obj.sync() self.assertTrue(mock_list_vols.called) self.assertTrue(mock_vol_get_all.called) self.assertTrue(get_lock.called) # collect the volumes from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the volumes to DB mock_list_vols.return_value = fake_storage_obj.list_volumes(context) mock_vol_get_all.return_value = list() vol_obj.sync() self.assertTrue(mock_vol_create.called) # update the volumes to DB mock_list_vols.return_value = vols_list mock_vol_get_all.return_value = vols_list vol_obj.sync() self.assertTrue(mock_vol_update.called) # delete the volumes to DB mock_list_vols.return_value = list() mock_vol_get_all.return_value = vols_list vol_obj.sync() self.assertTrue(mock_vol_del.called) @mock.patch('delfin.db.volume_delete_by_storage') def test_remove(self, mock_vol_del): vol_obj = resources.StorageVolumeTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') vol_obj.remove() self.assertTrue(mock_vol_del.called) class TestStoragecontrollerTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_controllers') @mock.patch('delfin.db.controller_get_all') @mock.patch('delfin.db.controllers_delete') @mock.patch('delfin.db.controllers_update') @mock.patch('delfin.db.controllers_create') def test_sync_successful(self, mock_controller_create, mock_controller_update, mock_controller_del, mock_controller_get_all, mock_list_controllers, get_lock): controller_obj = resources.StorageControllerTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') controller_obj.sync() self.assertTrue(mock_list_controllers.called) self.assertTrue(mock_controller_get_all.called) self.assertTrue(get_lock.called) # collect the controllers from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the new controller to DB mock_list_controllers.return_value = \ fake_storage_obj.list_controllers(context) mock_controller_get_all.return_value = list() controller_obj.sync() self.assertTrue(mock_controller_create.called) # update the new controller of DB mock_list_controllers.return_value = controllers_list mock_controller_get_all.return_value = controllers_list controller_obj.sync() self.assertTrue(mock_controller_update.called) # delete the new controller to DB mock_list_controllers.return_value = list() mock_controller_get_all.return_value = controllers_list controller_obj.sync() self.assertTrue(mock_controller_del.called) @mock.patch('delfin.db.controller_delete_by_storage') def test_remove(self, mock_controller_del): controller_obj = resources.StorageControllerTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') controller_obj.remove() self.assertTrue(mock_controller_del.called) class TestStoragePortTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_ports') @mock.patch('delfin.db.port_get_all') @mock.patch('delfin.db.ports_delete') @mock.patch('delfin.db.ports_update') @mock.patch('delfin.db.ports_create') def test_sync_successful(self, mock_port_create, mock_port_update, mock_port_del, mock_port_get_all, mock_list_ports, get_lock): port_obj = resources.StoragePortTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') port_obj.sync() self.assertTrue(mock_list_ports.called) self.assertTrue(mock_port_get_all.called) self.assertTrue(get_lock.called) # collect the ports from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the ports to DB mock_list_ports.return_value = fake_storage_obj.list_ports(context) mock_port_get_all.return_value = list() port_obj.sync() self.assertTrue(mock_port_create.called) # update the ports to DB mock_list_ports.return_value = ports_list mock_port_get_all.return_value = ports_list port_obj.sync() self.assertTrue(mock_port_update.called) # delete the ports to DB mock_list_ports.return_value = list() mock_port_get_all.return_value = ports_list port_obj.sync() self.assertTrue(mock_port_del.called) @mock.patch('delfin.db.port_delete_by_storage') def test_remove(self, mock_port_del): port_obj = resources.StoragePortTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') port_obj.remove() self.assertTrue(mock_port_del.called) class TestStorageDiskTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_disks') @mock.patch('delfin.db.disk_get_all') @mock.patch('delfin.db.disks_delete') @mock.patch('delfin.db.disks_update') @mock.patch('delfin.db.disks_create') def test_sync_successful(self, mock_disk_create, mock_disk_update, mock_disk_del, mock_disk_get_all, mock_list_disks, get_lock): disk_obj = resources.StorageDiskTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') disk_obj.sync() self.assertTrue(mock_list_disks.called) self.assertTrue(mock_disk_get_all.called) self.assertTrue(get_lock.called) # collect the disks from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the disks to DB mock_list_disks.return_value = fake_storage_obj.list_disks(context) mock_disk_get_all.return_value = list() disk_obj.sync() self.assertTrue(mock_disk_create.called) # update the disks to DB mock_list_disks.return_value = disks_list mock_disk_get_all.return_value = disks_list disk_obj.sync() self.assertTrue(mock_disk_update.called) # delete the disks to DB mock_list_disks.return_value = list() mock_disk_get_all.return_value = disks_list disk_obj.sync() self.assertTrue(mock_disk_del.called) @mock.patch('delfin.db.disk_delete_by_storage') def test_remove(self, mock_disk_del): disk_obj = resources.StorageDiskTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') disk_obj.remove() self.assertTrue(mock_disk_del.called) class TestStorageQuotaTask(test.TestCase): # @mock.patch('delfin.drivers.api.API.list_quotas', 'get_lock') @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_quotas') @mock.patch('delfin.db.quota_get_all') @mock.patch('delfin.db.quotas_delete') @mock.patch('delfin.db.quotas_update') @mock.patch('delfin.db.quotas_create') def test_sync_successful(self, mock_quota_create, mock_quota_update, mock_quota_del, mock_quota_get_all, mock_list_quotas, get_lock): quota_obj = resources.StorageQuotaTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') quota_obj.sync() self.assertTrue(mock_list_quotas.called) self.assertTrue(mock_quota_get_all.called) self.assertTrue(get_lock.called) # collect the quotas from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the quotas to DB mock_list_quotas.return_value =\ fake_storage_obj.list_quotas(context) mock_quota_get_all.return_value = list() quota_obj.sync() self.assertTrue(mock_quota_create.called) # update the quotas to DB mock_list_quotas.return_value = quotas_list mock_quota_get_all.return_value = quotas_list quota_obj.sync() self.assertTrue(mock_quota_update.called) # delete the quotas to DB mock_list_quotas.return_value = list() mock_quota_get_all.return_value = quotas_list quota_obj.sync() self.assertTrue(mock_quota_del.called) @mock.patch('delfin.db.quota_delete_by_storage') def test_remove(self, mock_quota_del): quota_obj = resources.StorageQuotaTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') quota_obj.remove() self.assertTrue(mock_quota_del.called) class TestStorageFilesystemTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_filesystems') @mock.patch('delfin.db.filesystem_get_all') @mock.patch('delfin.db.filesystems_delete') @mock.patch('delfin.db.filesystems_update') @mock.patch('delfin.db.filesystems_create') def test_sync_successful(self, mock_filesystem_create, mock_filesystem_update, mock_filesystem_del, mock_filesystem_get_all, mock_list_filesystems, get_lock): filesystem_obj = resources.StorageFilesystemTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') filesystem_obj.sync() self.assertTrue(mock_list_filesystems.called) self.assertTrue(mock_filesystem_get_all.called) self.assertTrue(get_lock.called) # collect the filesystems from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the filesystems to DB mock_list_filesystems.return_value =\ fake_storage_obj.list_filesystems(context) mock_filesystem_get_all.return_value = list() filesystem_obj.sync() self.assertTrue(mock_filesystem_create.called) # update the filesystems to DB mock_list_filesystems.return_value = filesystems_list mock_filesystem_get_all.return_value = filesystems_list filesystem_obj.sync() self.assertTrue(mock_filesystem_update.called) # delete the filesystems to DB mock_list_filesystems.return_value = list() mock_filesystem_get_all.return_value = filesystems_list filesystem_obj.sync() self.assertTrue(mock_filesystem_del.called) @mock.patch('delfin.db.filesystem_delete_by_storage') def test_remove(self, mock_filesystem_del): filesystem_obj = resources.StorageFilesystemTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') filesystem_obj.remove() self.assertTrue(mock_filesystem_del.called) class TestStorageQtreeTask(test.TestCase): # @mock.patch('delfin.drivers.api.API.list_qtrees', 'get_lock') @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_qtrees') @mock.patch('delfin.db.qtree_get_all') @mock.patch('delfin.db.qtrees_delete') @mock.patch('delfin.db.qtrees_update') @mock.patch('delfin.db.qtrees_create') def test_sync_successful(self, mock_qtree_create, mock_qtree_update, mock_qtree_del, mock_qtree_get_all, mock_list_qtrees, get_lock): qtree_obj = resources.StorageQtreeTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') qtree_obj.sync() self.assertTrue(mock_list_qtrees.called) self.assertTrue(mock_qtree_get_all.called) self.assertTrue(get_lock.called) # collect the qtrees from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the qtrees to DB mock_list_qtrees.return_value =\ fake_storage_obj.list_qtrees(context) mock_qtree_get_all.return_value = list() qtree_obj.sync() self.assertTrue(mock_qtree_create.called) # update the qtrees to DB mock_list_qtrees.return_value = qtrees_list mock_qtree_get_all.return_value = qtrees_list qtree_obj.sync() self.assertTrue(mock_qtree_update.called) # delete the qtrees to DB mock_list_qtrees.return_value = list() mock_qtree_get_all.return_value = qtrees_list qtree_obj.sync() self.assertTrue(mock_qtree_del.called) @mock.patch('delfin.db.qtree_delete_by_storage') def test_remove(self, mock_qtree_del): qtree_obj = resources.StorageQtreeTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') qtree_obj.remove() self.assertTrue(mock_qtree_del.called) class TestStorageShareTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_shares') @mock.patch('delfin.db.share_get_all') @mock.patch('delfin.db.shares_delete') @mock.patch('delfin.db.shares_update') @mock.patch('delfin.db.shares_create') def test_sync_successful(self, mock_share_create, mock_share_update, mock_share_del, mock_share_get_all, mock_list_shares, get_lock): share_obj = resources.StorageShareTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') share_obj.sync() self.assertTrue(mock_list_shares.called) self.assertTrue(mock_share_get_all.called) self.assertTrue(get_lock.called) # collect the shares from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # add the shares to DB mock_list_shares.return_value = fake_storage_obj.list_shares(context) mock_share_get_all.return_value = list() share_obj.sync() self.assertTrue(mock_share_create.called) # update the shares to DB mock_list_shares.return_value = shares_list mock_share_get_all.return_value = shares_list share_obj.sync() self.assertTrue(mock_share_update.called) # delete the shares to DB mock_list_shares.return_value = list() mock_share_get_all.return_value = shares_list share_obj.sync() self.assertTrue(mock_share_del.called) @mock.patch('delfin.db.share_delete_by_storage') def test_remove(self, mock_share_del): share_obj = resources.StorageShareTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') share_obj.remove() self.assertTrue(mock_share_del.called) class TestStorageHostInitiatorTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_storage_host_initiators') @mock.patch('delfin.db.storage_host_initiators_delete_by_storage') @mock.patch('delfin.db.storage_host_initiators_create') def test_sync_successful(self, mock_storage_host_initiator_create, mock_storage_host_initiator_delete_by_storage, mock_list_storage_host_initiators, get_lock): storage_host_initiator_obj = resources.StorageHostInitiatorTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') # Collect the storage host initiators from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # Add the storage host initiators to DB mock_list_storage_host_initiators.return_value \ = fake_storage_obj.list_storage_host_initiators(context) storage_host_initiator_obj.sync() self.assertTrue(mock_storage_host_initiator_delete_by_storage.called) self.assertTrue(mock_storage_host_initiator_create.called) @mock.patch('delfin.db.storage_host_initiators_delete_by_storage') def test_remove(self, mock_storage_host_initiators_del): storage_host_initiator_obj = resources.StorageHostInitiatorTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') storage_host_initiator_obj.remove() self.assertTrue(mock_storage_host_initiators_del.called) class TestStorageHostTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_storage_hosts') @mock.patch('delfin.db.storage_hosts_get_all') @mock.patch('delfin.db.storage_hosts_delete') @mock.patch('delfin.db.storage_hosts_update') @mock.patch('delfin.db.storage_hosts_create') def test_sync_successful(self, mock_storage_host_create, mock_storage_host_update, mock_storage_host_del, mock_storage_hosts_get_all, mock_list_storage_hosts, get_lock): storage_host_obj = resources.StorageHostTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') storage_host_obj.sync() self.assertTrue(mock_list_storage_hosts.called) self.assertTrue(mock_storage_hosts_get_all.called) self.assertTrue(get_lock.called) # Collect the storage hosts from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # Add the storage hosts to DB mock_list_storage_hosts.return_value \ = fake_storage_obj.list_storage_hosts(context) mock_storage_hosts_get_all.return_value = list() storage_host_obj.sync() self.assertTrue(mock_storage_host_create.called) # Update the storage hosts to DB mock_list_storage_hosts.return_value \ = storage_hosts_list mock_storage_hosts_get_all.return_value \ = storage_hosts_list storage_host_obj.sync() self.assertTrue(mock_storage_host_update.called) # Delete the storage hosts to DB mock_list_storage_hosts.return_value = list() mock_storage_hosts_get_all.return_value \ = storage_hosts_list storage_host_obj.sync() self.assertTrue(mock_storage_host_del.called) @mock.patch('delfin.db.storage_hosts_delete_by_storage') def test_remove(self, mock_storage_hosts_del): storage_host_obj = resources.StorageHostTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') storage_host_obj.remove() self.assertTrue(mock_storage_hosts_del.called) class TestStorageHostGroupTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_storage_host_groups') @mock.patch('delfin.db.storage_host_groups_get_all') @mock.patch('delfin.db.storage_host_groups_delete') @mock.patch('delfin.db.storage_host_groups_update') @mock.patch('delfin.db.storage_host_groups_create') def test_sync_successful(self, mock_storage_host_group_create, mock_storage_host_group_update, mock_storage_host_group_del, mock_storage_host_groups_get_all, mock_list_storage_host_groups, get_lock): storage_host_group_obj = resources.StorageHostGroupTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') storage_host_group_obj.sync() self.assertTrue(mock_list_storage_host_groups.called) self.assertTrue(mock_storage_host_groups_get_all.called) self.assertTrue(get_lock.called) # Collect the storage host groups from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # Add the storage host groups to DB mock_list_storage_host_groups.return_value \ = fake_storage_obj.list_storage_host_groups(context) mock_storage_host_groups_get_all.return_value = list() storage_host_group_obj.sync() self.assertTrue(mock_storage_host_group_create.called) # Update the storage host groups to DB mock_list_storage_host_groups.return_value \ = storage_host_groups_list mock_storage_host_groups_get_all.return_value \ = storage_hg_list storage_host_group_obj.sync() self.assertTrue(mock_storage_host_group_update.called) # Delete the storage host groups to DB mock_list_storage_host_groups.return_value = empty_shgs_list mock_storage_host_groups_get_all.return_value \ = storage_hg_list storage_host_group_obj.sync() self.assertTrue(mock_storage_host_group_del.called) @mock.patch('delfin.db.storage_host_groups_delete_by_storage') def test_remove(self, mock_storage_host_groups_del): storage_host_group_obj = resources.StorageHostGroupTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') storage_host_group_obj.remove() self.assertTrue(mock_storage_host_groups_del.called) class TestVolumeGroupTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_volume_groups') @mock.patch('delfin.db.volume_groups_get_all') @mock.patch('delfin.db.volume_groups_delete') @mock.patch('delfin.db.volume_groups_update') @mock.patch('delfin.db.volume_groups_create') def test_sync_successful(self, mock_volume_group_create, mock_volume_group_update, mock_volume_group_del, mock_volume_groups_get_all, mock_list_volume_groups, get_lock): volume_group_obj = resources.VolumeGroupTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') volume_group_obj.sync() self.assertTrue(mock_list_volume_groups.called) self.assertTrue(mock_volume_groups_get_all.called) self.assertTrue(get_lock.called) # Collect the volume groups from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # Add the volume groups to DB mock_list_volume_groups.return_value \ = fake_storage_obj.list_volume_groups(context) mock_volume_groups_get_all.return_value = list() volume_group_obj.sync() self.assertTrue(mock_volume_group_create.called) # Update the volume groups to DB mock_list_volume_groups.return_value \ = volume_groups_list mock_volume_groups_get_all.return_value \ = vg_list volume_group_obj.sync() self.assertTrue(mock_volume_group_update.called) # Delete the volume groups to DB mock_list_volume_groups.return_value = empty_volume_groups_list mock_volume_groups_get_all.return_value \ = vg_list volume_group_obj.sync() self.assertTrue(mock_volume_group_del.called) @mock.patch('delfin.db.volume_groups_delete_by_storage') def test_remove(self, mock_volume_groups_del): volume_group_obj = resources.VolumeGroupTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') volume_group_obj.remove() self.assertTrue(mock_volume_groups_del.called) class TestPortGroupTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_port_groups') @mock.patch('delfin.db.port_groups_get_all') @mock.patch('delfin.db.port_groups_delete') @mock.patch('delfin.db.port_groups_update') @mock.patch('delfin.db.port_groups_create') def test_sync_successful(self, mock_port_group_create, mock_port_group_update, mock_port_group_del, mock_port_groups_get_all, mock_list_port_groups, get_lock): ctxt = context.get_admin_context() port_group_obj = resources.PortGroupTask( ctxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') port_group_obj.sync() self.assertTrue(mock_list_port_groups.called) self.assertTrue(mock_port_groups_get_all.called) self.assertTrue(get_lock.called) # Collect the storage host groups from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # Add the storage host groups to DB mock_list_port_groups.return_value \ = fake_storage_obj.list_port_groups(context) mock_port_groups_get_all.return_value = list() port_group_obj.sync() self.assertTrue(mock_port_group_create.called) # Update the storage host groups to DB mock_list_port_groups.return_value \ = port_groups_list mock_port_groups_get_all.return_value \ = pg_list port_group_obj.sync() self.assertTrue(mock_port_group_update.called) # Delete the storage host groups to DB mock_list_port_groups.return_value = empty_port_groups_list mock_port_groups_get_all.return_value \ = pg_list port_group_obj.sync() self.assertTrue(mock_port_group_del.called) @mock.patch('delfin.db.port_groups_delete_by_storage') def test_remove(self, mock_port_groups_del): port_group_obj = resources.PortGroupTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') port_group_obj.remove() self.assertTrue(mock_port_groups_del.called) class TestMaskingViewTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_masking_views') @mock.patch('delfin.db.masking_views_get_all') @mock.patch('delfin.db.masking_views_delete') @mock.patch('delfin.db.masking_views_update') @mock.patch('delfin.db.masking_views_create') def test_sync_successful(self, mock_masking_view_create, mock_masking_view_update, mock_masking_view_del, mock_masking_views_get_all, mock_list_masking_views, get_lock): cntxt = context.get_admin_context() masking_view_obj = resources.MaskingViewTask( cntxt, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') masking_view_obj.sync() self.assertTrue(mock_list_masking_views.called) self.assertTrue(mock_masking_views_get_all.called) self.assertTrue(get_lock.called) # Collect the volume groups from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() # Add the volume groups to DB mock_list_masking_views.return_value \ = fake_storage_obj.list_masking_views(context) mock_masking_views_get_all.return_value = list() masking_view_obj.sync() self.assertTrue(mock_masking_view_create.called) # Update the volume groups to DB mock_list_masking_views.return_value \ = masking_views_list mock_masking_views_get_all.return_value \ = masking_views_list masking_view_obj.sync() self.assertTrue(mock_masking_view_update.called) # Delete the volume groups to DB mock_list_masking_views.return_value = list() mock_masking_views_get_all.return_value \ = masking_views_list masking_view_obj.sync() self.assertTrue(mock_masking_view_del.called) @mock.patch('delfin.db.masking_views_delete_by_storage') def test_remove(self, mock_masking_views_del): masking_view_obj = resources.MaskingViewTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') masking_view_obj.remove() self.assertTrue(mock_masking_views_del.called) ================================================ FILE: delfin/tests/unit/task_manager/test_telemetry.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from delfin import context from delfin import db from delfin import exception from delfin import test from delfin.task_manager.tasks import telemetry from delfin.task_manager.metrics_manager import MetricsTaskManager from delfin.task_manager.scheduler.schedulers.telemetry.job_handler \ import JobHandler, FailedJobHandler from apscheduler.schedulers.background import BackgroundScheduler from delfin.task_manager.subprocess_rpcapi import SubprocessAPI fake_storage = { 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', 'name': 'fake_driver', 'description': 'it is a fake driver.', 'vendor': 'fake_vendor', 'model': 'fake_model', 'status': 'normal', 'serial_number': '2102453JPN12KA000011', 'firmware_version': '1.0.0', 'location': 'HK', 'total_capacity': 1024 * 1024, 'used_capacity': 3126, 'free_capacity': 1045449, } class TestPerformanceCollectionTask(test.TestCase): @mock.patch.object(db, 'storage_get', mock.Mock(return_value=fake_storage)) @mock.patch('delfin.exporter.base_exporter.PerformanceExporterManager' '.dispatch') @mock.patch('delfin.drivers.api.API.collect_perf_metrics') def test_performance_collection_success(self, mock_collect_perf_metrics, mock_dispatch): perf_task = telemetry.PerformanceCollectionTask() storage_id = fake_storage['id'] mock_collect_perf_metrics.return_value = [] perf_task.collect(context, storage_id, [], 100800, 100900) self.assertEqual(mock_collect_perf_metrics.call_count, 1) self.assertEqual(mock_dispatch.call_count, 1) @mock.patch.object(db, 'storage_get', mock.Mock(return_value=fake_storage)) @mock.patch('logging.LoggerAdapter.error') @mock.patch('delfin.exporter.base_exporter.PerformanceExporterManager' '.dispatch') @mock.patch('delfin.drivers.api.API.collect_perf_metrics') def test_performance_collection_failure(self, mock_collect_perf_metrics, mock_dispatch, mock_log_error): perf_task = telemetry.PerformanceCollectionTask() storage_id = fake_storage['id'] # No alert mock_collect_perf_metrics.return_value = [] mock_collect_perf_metrics.side_effect = \ exception.Invalid('Fake exception') perf_task.collect(context, storage_id, [], 100800, 100900) # Verify that dispatch is not done and error is logged # when collect metric fails self.assertEqual(mock_dispatch.call_count, 0) self.assertEqual(mock_log_error.call_count, 1) @mock.patch.object(SubprocessAPI, 'assign_job_local') @mock.patch.object(db, 'task_get') @mock.patch.object(JobHandler, 'schedule_job') @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs') @mock.patch.object(MetricsTaskManager, 'create_process') def test_metric_manager_assign_job(self, mock_create, mock_boot_job, mock_job_schedule, mock_db, mock_subprocess_api): mock_db.return_value = { 'storage_id': 'storage_id1', 'args': 'args', 'interval': 10, } mock_create.return_value = None mock_boot_job.return_value = None mock_job_schedule.return_value = None mock_subprocess_api.return_value = None mgr = MetricsTaskManager() mgr.enable_sub_process = False mgr.assign_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) mgr.enable_sub_process = True mgr.scheduler = BackgroundScheduler() mgr.scheduler.start() mgr.assign_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) self.assertEqual(mock_subprocess_api.call_count, 1) @mock.patch.object(SubprocessAPI, 'remove_job_local') @mock.patch.object(db, 'task_get') @mock.patch.object(JobHandler, 'remove_job') @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs') @mock.patch.object(MetricsTaskManager, 'create_process') def test_metric_manager_remove_job(self, mock_create, mock_boot_job, mock_job_schedule, mock_db, mock_subprocess_api): mock_db.return_value = { 'storage_id': 'storage_id1', 'args': 'args', 'interval': 10, } mock_create.return_value = None mock_boot_job.return_value = None mock_job_schedule.return_value = None mock_subprocess_api.return_value = None mgr = MetricsTaskManager() mgr.enable_sub_process = False mgr.remove_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) mgr.enable_sub_process = True mgr.executor_map = { 'host1': { "storages": ['storage_id1'], } } mgr.scheduler = BackgroundScheduler() mgr.scheduler.start() mgr.remove_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) self.assertEqual(mock_subprocess_api.call_count, 1) @mock.patch.object(SubprocessAPI, 'assign_failed_job_local') @mock.patch.object(db, 'failed_task_get') @mock.patch.object(FailedJobHandler, 'schedule_failed_job') @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs') @mock.patch.object(MetricsTaskManager, 'create_process') @mock.patch.object(MetricsTaskManager, 'get_local_executor') def test_metric_manager_assign_failed_job(self, mock_executor, mock_create, mock_boot_job, mock_job_schedule, mock_db, mock_subprocess_api): mock_db.return_value = { 'storage_id': 'storage_id1', 'args': 'args', 'interval': 10, } mock_create.return_value = None mock_boot_job.return_value = None mock_job_schedule.return_value = None mock_subprocess_api.return_value = None mock_executor.return_value = None mgr = MetricsTaskManager() mgr.enable_sub_process = False mgr.assign_failed_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) mgr.enable_sub_process = True mgr.scheduler = BackgroundScheduler() mgr.scheduler.start() mgr.assign_failed_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) self.assertEqual(mock_subprocess_api.call_count, 1) @mock.patch.object(SubprocessAPI, 'remove_failed_job_local') @mock.patch.object(db, 'failed_task_get') @mock.patch.object(FailedJobHandler, 'remove_failed_job') @mock.patch.object(MetricsTaskManager, 'schedule_boot_jobs') @mock.patch.object(MetricsTaskManager, 'create_process') def test_metric_manager_remove_failed_job(self, mock_create, mock_boot_job, mock_job_schedule, mock_db, mock_subprocess_api): mock_db.return_value = { 'storage_id': 'storage_id1', 'args': 'args', 'interval': 10, } mock_create.return_value = None mock_boot_job.return_value = None mock_job_schedule.return_value = None mock_subprocess_api.return_value = None mgr = MetricsTaskManager() mgr.enable_sub_process = False mgr.remove_failed_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) mgr.enable_sub_process = True mgr.executor_map = { 'host1': { "storages": ['storage_id1'], } } mgr.scheduler = BackgroundScheduler() mgr.scheduler.start() mgr.remove_failed_job('context', 'task_id1', 'host1') self.assertEqual(mock_job_schedule.call_count, 1) self.assertEqual(mock_subprocess_api.call_count, 1) ================================================ FILE: delfin/tests/unit/test_context.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from delfin import context from delfin import test class ContextTestCase(test.TestCase): def test_request_context_elevated(self): user_context = context.RequestContext( 'fake_user', 'fake_project', is_admin=False) self.assertFalse(user_context.is_admin) self.assertEqual([], user_context.roles) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) self.assertTrue(admin_context.is_admin) self.assertNotIn('admin', user_context.roles) self.assertIn('admin', admin_context.roles) def test_request_context_read_deleted(self): ctxt = context.RequestContext('111', '222', read_deleted='yes') self.assertEqual('yes', ctxt.read_deleted) ctxt.read_deleted = 'no' self.assertEqual('no', ctxt.read_deleted) def test_request_context_read_deleted_invalid(self): self.assertRaises(ValueError, context.RequestContext, '111', '222', read_deleted=True) ctxt = context.RequestContext('111', '222') self.assertRaises(ValueError, setattr, ctxt, 'read_deleted', True) ================================================ FILE: delfin/tests/unit/test_coordination.py ================================================ # Copyright 2015 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from unittest import mock from tooz import coordination as tooz_coordination from tooz import locking as tooz_locking from delfin import coordination from delfin import test class Locked(Exception): pass class MockToozLock(tooz_locking.Lock): active_locks = set() def acquire(self, blocking=True): if self.name not in self.active_locks: self.active_locks.add(self.name) return True elif not blocking: return False else: raise Locked def release(self): self.active_locks.remove(self.name) @ddt.ddt class CoordinatorTestCase(test.TestCase): def setUp(self): super(CoordinatorTestCase, self).setUp() self.get_coordinator = self.mock_object(tooz_coordination, 'get_coordinator') def test_coordinator_start(self): crd = self.get_coordinator.return_value agent = coordination.Coordinator() agent.start() self.assertTrue(self.get_coordinator.called) self.assertTrue(crd.start.called) self.assertTrue(agent.started) def test_coordinator_stop(self): crd = self.get_coordinator.return_value agent = coordination.Coordinator() agent.start() self.assertIsNotNone(agent.coordinator) agent.stop() self.assertTrue(crd.stop.called) self.assertIsNone(agent.coordinator) self.assertFalse(agent.started) def test_coordinator_lock(self): crd = self.get_coordinator.return_value crd.get_lock.side_effect = lambda n: MockToozLock(n) agent1 = coordination.Coordinator() agent1.start() agent2 = coordination.Coordinator() agent2.start() lock_string = 'lock' expected_lock = lock_string.encode('ascii') self.assertNotIn(expected_lock, MockToozLock.active_locks) with agent1.get_lock(lock_string): self.assertIn(expected_lock, MockToozLock.active_locks) self.assertRaises(Locked, agent1.get_lock(lock_string).acquire) self.assertRaises(Locked, agent2.get_lock(lock_string).acquire) self.assertNotIn(expected_lock, MockToozLock.active_locks) def test_coordinator_offline(self): crd = self.get_coordinator.return_value crd.start.side_effect = tooz_coordination.ToozConnectionError('err') agent = coordination.Coordinator() self.assertRaises(tooz_coordination.ToozError, agent.start) self.assertFalse(agent.started) @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') class CoordinationTestCase(test.TestCase): def test_lock(self, get_lock): with coordination.Lock('lock'): self.assertTrue(get_lock.called) def test_synchronized(self, get_lock): @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}') def func(foo, bar): pass foo = mock.Mock() foo.val = 7 bar = mock.MagicMock() bar.__getitem__.return_value = 8 func(foo, bar) get_lock.assert_called_with('lock-func-7-8') class ConsistentHashingTestCase(test.TestCase): def setUp(self): super(ConsistentHashingTestCase, self).setUp() self.get_coordinator = self.mock_object(tooz_coordination, 'get_coordinator') def test_join_group(self): crd = self.get_coordinator.return_value part = coordination.ConsistentHashing() part.start() part.join_group() self.assertTrue(crd.join_partitioned_group.called) def test_register_watcher_func(self): crd = self.get_coordinator.return_value part = coordination.ConsistentHashing() part.start() part.register_watcher_func(mock.Mock(), mock.Mock()) self.assertTrue(crd.watch_join_group.called) self.assertTrue(crd.watch_leave_group.called) def test_watch_group_change(self): crd = self.get_coordinator.return_value part = coordination.ConsistentHashing() part.start() part.watch_group_change() self.assertTrue(crd.run_watchers.called) ================================================ FILE: delfin/tests/unit/test_manager.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Base Manager for Manila.""" import ddt from oslo_utils import importutils from delfin import manager from delfin import test @ddt.ddt class ManagerTestCase(test.TestCase): def setUp(self): super(ManagerTestCase, self).setUp() self.host = 'host' self.db_driver = 'fake_driver' self.mock_object(importutils, 'import_module') def test_verify_manager_instance(self): fake_manager = manager.Manager(self.host, self.db_driver) self.assertTrue(hasattr(fake_manager, '_periodic_tasks')) self.assertTrue(hasattr(fake_manager, 'additional_endpoints')) self.assertTrue(hasattr(fake_manager, 'host')) self.assertTrue(hasattr(fake_manager, 'periodic_tasks')) self.assertTrue(hasattr(fake_manager, 'init_host')) self.assertTrue(hasattr(fake_manager, 'service_version')) self.assertTrue(hasattr(fake_manager, 'service_config')) self.assertEqual(self.host, fake_manager.host) importutils.import_module.assert_called_once_with(self.db_driver) @ddt.data(True, False) def test_periodic_tasks(self, raise_on_error): fake_manager = manager.Manager(self.host, self.db_driver) fake_context = 'fake_context' self.mock_object(fake_manager, 'run_periodic_tasks') fake_manager.periodic_tasks(fake_context, raise_on_error) fake_manager.run_periodic_tasks.assert_called_once_with( fake_context, raise_on_error=raise_on_error) ================================================ FILE: delfin/tests/unit/test_rpc.py ================================================ # Copyright 2017 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from unittest import mock from delfin import rpc from delfin import test @ddt.ddt class RPCTestCase(test.TestCase): @ddt.data([], ['noop'], ['noop', 'noop']) @mock.patch('oslo_messaging.JsonPayloadSerializer', wraps=True) def test_init_no_notifications(self, driver, serializer_mock): self.override_config('driver', driver, group='oslo_messaging_notifications') rpc.init(test.CONF) self.assertEqual(rpc.utils.DO_NOTHING, rpc.NOTIFIER) serializer_mock.assert_not_called() @mock.patch.object(rpc, 'messaging') def test_init_notifications(self, messaging_mock): rpc.init(test.CONF) self.assertTrue(messaging_mock.JsonPayloadSerializer.called) self.assertTrue(messaging_mock.Notifier.called) self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value) ================================================ FILE: delfin/tests/unit/utils.py ================================================ # Copyright 2021 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from delfin.db.sqlalchemy import models case = unittest.TestCase() def check_isinstance(obj, cls): if isinstance(obj, cls): return True else: assert isinstance(models.StoragePool, object) def get_db_schema_attributes_list(schema): db_attrib_lst = [] for i in schema.__dict__.keys(): if not i.startswith('_'): db_attrib_lst.append(i) return sorted(db_attrib_lst) def validate_db_schema_model(got, model): try: res = check_isinstance(got, model) if res: attributes = get_db_schema_attributes_list(model) lst = sorted(list(got.keys())) case.assertListEqual(attributes, lst) case.assertCountEqual(attributes, lst) except AssertionError: raise ================================================ FILE: delfin/tests/unit/wsgi/__init__.py ================================================ ================================================ FILE: delfin/tests/unit/wsgi/test_common.py ================================================ # Copyright 2017 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from delfin import test from delfin.wsgi import common class FakeApp(common.Application): def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) class WSGICommonTestCase(test.TestCase): def test_application_factory(self): fake_global_config = mock.Mock() kwargs = {"k1": "v1", "k2": "v2"} result = FakeApp.factory(fake_global_config, **kwargs) fake_global_config.assert_not_called() self.assertIsInstance(result, FakeApp) for k, v in kwargs.items(): self.assertTrue(hasattr(result, k)) self.assertEqual(getattr(result, k), v) def test_application___call__(self): self.assertRaises( NotImplementedError, common.Application(), 'fake_environ', 'fake_start_response') ================================================ FILE: delfin/utils.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import functools import inspect import os import pyclbr import random import re import sys import threading from eventlet import pools import logging from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import encodeutils from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import timeutils import paramiko import retrying import six from delfin import exception from delfin.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) lock = threading.Lock() if hasattr('CONF', 'debug') and CONF.debug: logging.getLogger("paramiko").setLevel(logging.DEBUG) _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' synchronized = lockutils.synchronized_with_prefix('delfin-') def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" # Python provides a similar instance method for datetime.datetime objects # called isoformat(). The format of the strings generated by isoformat() # have a couple of problems: # 1) The strings generated by isotime are used in tokens and other public # APIs that we can't change without a deprecation period. The strings # generated by isoformat are not the same format, so we can't just # change to it. # 2) The strings generated by isoformat do not include the microseconds if # the value happens to be 0. This will likely show up as random failures # as parsers may be written to always expect microseconds, and it will # parse correctly most of the time. if not at: at = timeutils.utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' # Need to handle either iso8601 or python UTC format st += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz) return st def _get_root_helper(): return 'sudo delfin-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() function.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() if hasattr('CONF', 'debug') and CONF.debug: kwargs['loglevel'] = logging.DEBUG return processutils.execute(*cmd, **kwargs) class SSHPool(pools.Pool): """A simple eventlet pool to hold ssh connections.""" def __init__(self, ip, port, conn_timeout, login, password=None, privatekey=None, *args, **kwargs): self.ip = ip self.port = port self.login = login self.password = password self.conn_timeout = conn_timeout if conn_timeout else None self.path_to_private_key = privatekey super(SSHPool, self).__init__(*args, **kwargs) def create(self): # pylint: disable=method-hidden ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) look_for_keys = True if self.path_to_private_key: self.path_to_private_key = os.path.expanduser( self.path_to_private_key) look_for_keys = False elif self.password: look_for_keys = False try: ssh.connect(self.ip, port=self.port, username=self.login, password=self.password, key_filename=self.path_to_private_key, look_for_keys=look_for_keys, timeout=self.conn_timeout, banner_timeout=self.conn_timeout) if self.conn_timeout: transport = ssh.get_transport() transport.set_keepalive(self.conn_timeout) return ssh except Exception as e: msg = _("Check whether private key or password are correctly " "set. Error connecting via ssh: %s") % e LOG.error(msg) raise exception.SSHException(msg) def get(self): """Return an item from the pool, when one is available. This may cause the calling greenthread to block. Check if a connection is active before returning it. For dead connections create and return a new connection. """ if self.free_items: conn = self.free_items.popleft() if conn: if conn.get_transport().is_active(): return conn else: conn.close() return self.create() if self.current_size < self.max_size: created = self.create() self.current_size += 1 return created return self.channel.get() def remove(self, ssh): """Close an ssh client and remove it from free_items.""" ssh.close() if ssh in self.free_items: self.free_items.remove(ssh) if self.current_size > 0: self.current_size -= 1 def check_ssh_injection(cmd_list): ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>', '<'] # Check whether injection attacks exist for arg in cmd_list: arg = arg.strip() # Check for matching quotes on the ends is_quoted = re.match('^(?P[\'"])(?P.*)(?P=quote)$', arg) if is_quoted: # Check for unescaped quotes within the quoted argument quoted = is_quoted.group('quoted') if quoted: if (re.match('[\'"]', quoted) or re.search('[^\\\\][\'"]', quoted)): raise exception.SSHInjectionThreat(cmd_list) else: # We only allow spaces within quoted arguments, and that # is the only special character allowed within quotes if len(arg.split()) > 1: raise exception.SSHInjectionThreat(cmd_list) # Second, check whether danger character in command. So the shell # special operator must be a single argument. for c in ssh_injection_pattern: if c not in arg: continue result = arg.find(c) if not result == -1: if result == 0 or not arg[result - 1] == '\\': raise exception.SSHInjectionThreat(cmd_list) def monkey_patch(): """Patch decorator. If the Flags.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'delfin.api.ec2.cloud:' \ delfin.common.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See delfin.common.common.notifier.api.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # Get decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) # NOTE(vponomaryov): we need to distinguish class methods types # for py2 and py3, because the concept of 'unbound methods' has # been removed from the python3.x if six.PY3: member_type = inspect.isfunction else: member_type = inspect.ismethod for method, func in inspect.getmembers(clz, member_type): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def file_open(*args, **kwargs): """Open file see built-in open() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return open(*args, **kwargs) def check_string_length(value, name, min_length=0, max_length=None, allow_all_spaces=True): """Check the length of specified string. :param value: the value of the string :param name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string """ try: strutils.check_string_length(value, name=name, min_length=min_length, max_length=max_length) except (ValueError, TypeError) as exc: raise exception.InvalidInput(exc) if not allow_all_spaces and value.isspace(): msg = _('%(name)s cannot be all spaces.') % name raise exception.InvalidInput(msg) def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. tdelta = timeutils.utcnow() - last_heartbeat elapsed = tdelta.total_seconds() return abs(elapsed) <= CONF.service_down_time def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass def is_valid_ip_address(ip_address, ip_version): ip_version = ([int(ip_version)] if not isinstance(ip_version, list) else ip_version) if not set(ip_version).issubset(set([4, 6])): raise exception.ImproperIPVersion(ip_version) if 4 in ip_version: if netutils.is_valid_ipv4(ip_address): return True if 6 in ip_version: if netutils.is_valid_ipv6(ip_address): return True return False def is_all_tenants(search_opts): """Checks to see if the all_tenants flag is in search_opts :param dict search_opts: The search options for a request :returns: boolean indicating if all_tenants are being requested or not """ all_tenants = search_opts.get('all_tenants') if all_tenants: try: all_tenants = strutils.bool_from_string(all_tenants, True) except ValueError as err: raise exception.InvalidInput(six.text_type(err)) else: # The empty string is considered enabling all_tenants all_tenants = 'all_tenants' in search_opts return all_tenants class IsAMatcher(object): def __init__(self, expected_value=None): self.expected_value = expected_value def __eq__(self, actual_value): return isinstance(actual_value, self.expected_value) class ComparableMixin(object): def _compare(self, other, method): try: return method(self._cmpkey(), other._cmpkey()) except (AttributeError, TypeError): # _cmpkey not implemented, or return different type, # so I can't compare with "other". return NotImplemented def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def retry(exception, interval=1, retries=10, backoff_rate=2, wait_random=False, backoff_sleep_max=None): """A wrapper around retrying library. This decorator allows to log and to check 'retries' input param. Time interval between retries is calculated in the following way: interval * backoff_rate ^ previous_attempt_number :param exception: expected exception type. When wrapped function raises an exception of this type, the function execution is retried. :param interval: param 'interval' is used to calculate time interval between retries: interval * backoff_rate ^ previous_attempt_number :param retries: number of retries. Use 0 for an infinite retry loop. :param backoff_rate: param 'backoff_rate' is used to calculate time interval between retries: interval * backoff_rate ^ previous_attempt_number :param wait_random: boolean value to enable retry with random wait timer. :param backoff_sleep_max: Maximum number of seconds for the calculated backoff sleep. Use None if no maximum is needed. """ def _retry_on_exception(e): return isinstance(e, exception) def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms): exp = backoff_rate ** previous_attempt_number wait_for = max(0, interval * exp) if wait_random: wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0) else: wait_val = wait_for * 1000.0 if backoff_sleep_max: wait_val = min(backoff_sleep_max * 1000.0, wait_val) LOG.debug("Sleeping for %s seconds.", (wait_val / 1000.0)) return wait_val def _print_stop(previous_attempt_number, delay_since_first_attempt_ms): delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0 LOG.debug("Failed attempt %s", previous_attempt_number) LOG.debug("Have been at this for %s seconds", delay_since_first_attempt) return retries > 0 and previous_attempt_number == retries if retries < 0: raise ValueError(_('Retries must be greater than or ' 'equal to 0 (received: %s).') % retries) def _decorator(f): @six.wraps(f) def _wrapper(*args, **kwargs): r = retrying.Retrying(retry_on_exception=_retry_on_exception, wait_func=_backoff_sleep, stop_func=_print_stop) return r.call(f, *args, **kwargs) return _wrapper return _decorator def get_bool_from_api_params(key, params, default=False, strict=True): """Parse bool value from request params. HTTPBadRequest will be directly raised either of the cases below: 1. invalid bool string was found by key(with strict on). 2. key not found while default value is invalid(with strict on). """ param = params.get(key, default) try: param = strutils.bool_from_string(param, strict=strict, default=default) except ValueError: msg = _('Invalid value %(param)s for %(param_string)s. ' 'Expecting a boolean.') % {'param': param, 'param_string': key} raise exception.InvalidInput(msg) return param def check_params_exist(keys, params): """Validates if keys exist in params. :param keys: List of keys to check :param params: Parameters received from REST API """ if any(set(keys) - set(params)): msg = _("Must specify all mandatory parameters: %s") % keys raise exception.InvalidInput(msg) def check_params_are_boolean(keys, params, default=False): """Validates if keys in params are boolean. :param keys: List of keys to check :param params: Parameters received from REST API :param default: default value when it does not exist :return: a dictionary with keys and respective retrieved value """ result = {} for key in keys: value = get_bool_from_api_params(key, params, default, strict=True) result[key] = value return result def convert_str(text): """Convert to native string. Convert bytes and Unicode strings to native strings: * convert to bytes on Python 2: encode Unicode using encodeutils.safe_encode() * convert to Unicode on Python 3: decode bytes from UTF-8 """ if six.PY2: return encodeutils.safe_encode(text) else: if isinstance(text, bytes): return text.decode('utf-8') else: return text class DoNothing(str): """Class that literrally does nothing. We inherit from str in case it's called with json.dumps. """ def __call__(self, *args, **kwargs): return self def __getattr__(self, name): return self DO_NOTHING = DoNothing() def notifications_enabled(conf): """Check if oslo notifications are enabled.""" notifications_driver = set(conf.oslo_messaging_notifications.driver) return notifications_driver and notifications_driver != {'noop'} def if_notifications_enabled(function): """Calls decorated method only if notifications are enabled.""" @functools.wraps(function) def wrapped(*args, **kwargs): if notifications_enabled(CONF): return function(*args, **kwargs) return DO_NOTHING return wrapped def write_local_file(filename, contents, as_root=False): tmp_filename = "%s.tmp" % filename if as_root: execute('tee', tmp_filename, run_as_root=True, process_input=contents) execute('mv', '-f', tmp_filename, filename, run_as_root=True) else: with open(tmp_filename, 'w') as f: f.write(contents) os.rename(tmp_filename, filename) def write_remote_file(ssh, filename, contents, as_root=False): tmp_filename = "%s.tmp" % filename if as_root: cmd = 'sudo tee "%s" > /dev/null' % tmp_filename cmd2 = 'sudo mv -f "%s" "%s"' % (tmp_filename, filename) else: cmd = 'cat > "%s"' % tmp_filename cmd2 = 'mv -f "%s" "%s"' % (tmp_filename, filename) stdin, __, __ = ssh.exec_command(cmd) stdin.write(contents) stdin.close() stdin.channel.shutdown_write() ssh.exec_command(cmd2) class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: with lock: if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] def utcnow_ms(): return int(timeutils.utcnow(True).timestamp() * 1000) ================================================ FILE: delfin/version.py ================================================ # Copyright 2020 The SODA Authors. # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr import version as pbr_version DELFIN_VENDOR = "SODA Foundation" DELFIN_PRODUCT = "SODA Delfin" DELFIN_PACKAGE = None # OS distro package version suffix loaded = False version_info = pbr_version.VersionInfo('delfin') version_string = version_info.version_string ================================================ FILE: delfin/wsgi/__init__.py ================================================ ================================================ FILE: delfin/wsgi/common.py ================================================ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" import webob.dec import webob.exc from delfin.i18n import _ class Request(webob.Request): pass class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = delfin.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import delfin.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(detail='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = delfin.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import delfin.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # pylint: disable=assignment-from-none response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) ================================================ FILE: docker-compose.yml ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Installation Steps: # ------------------ # # 1. Create the 'sodafoundation/delfin' docker image with Dockerfile in # delfin project using command below. (Note: In future we will upload # this image to docker-hub and it can be downloaded with out this step) # # $ docker build -t sodafoundation/delfin . # # 2. Export (optional) environment vars for hostnames for redis, rabbitmq and credentials for rabbitmq. Eg. # Here is example for exporting variable and its current values in setup. # # $ export DELFIN_RABBITMQ_USER=delfinuser # $ export DELFIN_RABBITMQ_PASS=delfinpass # $ export DELFIN_RABBITMQ_HOSTNAME=rabbitmq # $ export DELFIN_REDIS_HOSTNAME=redis # $ export DELFIN_METRICS_DIR=/var/lib/delfin/metrics # # 3. Bring up delfin project using following command # # $ docker-compose up -d # # 4. When finished using delfin project, bring down containers using following command # # $ docker-compose down # # 5. To bring up delfin project with multiple service instances # # $ docker-compose up -d --scale <>=<> # # example: Deploy delfin with 3 delfin-task and 2 delfin-alert instances # $ docker-compose up -d --scale delfin-task=3 --scale delfin-alert=2 # Note: Multiple instances of delfin-api are not allowed version: '3.3' services: redis: image: redis container_name: ${DELFIN_REDIS_HOSTNAME:-redis} command: redis-server ports: - ${DELFIN_REDIS_PORT:-6379}:6379 restart: always rabbitmq: image: rabbitmq:3-management container_name: ${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq} environment: RABBITMQ_DEFAULT_USER: ${DELFIN_RABBITMQ_USER:-delfinuser} RABBITMQ_DEFAULT_PASS: ${DELFIN_RABBITMQ_PASS:-delfinpass} RABBITMQ_DEFAULT_VHOST: "/" ports: - 5672:5672 - 15672:15672 restart: always delfin-api: image: sodafoundation/delfin command: "api" volumes: - ./etc/delfin:/etc/delfin - db_data:/var/lib/delfin ports: - 8190:8190 restart: always environment: - OS_COORDINATION__BACKEND_SERVER=${DELFIN_REDIS_HOSTNAME:-redis}:6379 - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672// depends_on: - redis - rabbitmq delfin-task: image: sodafoundation/delfin command: "task" volumes: - ./etc/delfin:/etc/delfin - db_data:/var/lib/delfin - metrics_dir:${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics} restart: always environment: - OS_COORDINATION__BACKEND_SERVER=${DELFIN_REDIS_HOSTNAME:-redis}:6379 - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672// - OS_PROMETHEUS_EXPORTER__METRICS_DIR=${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics} depends_on: - redis - rabbitmq delfin-alert: image: sodafoundation/delfin command: "alert" volumes: - ./etc/delfin:/etc/delfin - db_data:/var/lib/delfin restart: always environment: - OS_COORDINATION__BACKEND_SERVER=${DELFIN_REDIS_HOSTNAME:-redis}:6379 - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672// depends_on: - redis - rabbitmq delfin-exporter: image: sodafoundation/delfin command: "exporter" volumes: - ./etc/delfin:/etc/delfin - metrics_dir:${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics} ports: - 8195:8195 restart: always environment: - OS_DEFAULT__TRANSPORT_URL=rabbit://${DELFIN_RABBITMQ_USER:-delfinuser}:${DELFIN_RABBITMQ_PASS:-delfinpass}@${DELFIN_RABBITMQ_HOSTNAME:-rabbitmq}:5672// - OS_PROMETHEUS_EXPORTER__METRICS_DIR=${DELFIN_METRICS_DIR:-/var/lib/delfin/metrics} depends_on: - rabbitmq volumes: db_data: {} metrics_dir: {} ================================================ FILE: etc/delfin/api-paste.ini ================================================ ############# # Delfin # ############# [composite:delfin] use = call:delfin.api:root_app_factory /v1: delfin_api_v1 [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory [pipeline:delfin_api_v1] pipeline = cors http_proxy_to_wsgi context_wrapper delfin_api_v1app [app:delfin_api_v1app] paste.app_factory = delfin.api.v1.router:APIRouter.factory [filter:context_wrapper] paste.filter_factory = delfin.api.middlewares:ContextWrapper.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = delfin ================================================ FILE: etc/delfin/delfin.conf ================================================ [DEFAULT] api_paste_config = /etc/delfin/api-paste.ini delfin_cryptor = delfin.cryptor._Base64 api_max_limit = 1000 # Uncomment or add exporters # performance_exporters = PerformanceExporterPrometheus, PerformanceExporterKafka # alert_exporters = AlertExporterPrometheus [database] connection = sqlite:////var/lib/delfin/delfin.sqlite db_backend = sqlalchemy [TELEMETRY] performance_collection_interval = 900 [KAFKA_EXPORTER] kafka_topic_name = "delfin-kafka" kafka_ip = 'localhost' kafka_port = '9092' [PROMETHEUS_EXPORTER] metric_server_ip = 0.0.0.0 metric_server_port = 8195 metrics_cache_file = /var/lib/delfin/delfin_exporter.txt [PROMETHEUS_ALERT_MANAGER_EXPORTER] alert_manager_host = 'localhost' alert_manager_port = '9093' ================================================ FILE: installer/README.md ================================================ # Delfin Installation Guide The SODA Delfin supports two types of installation * Installation using Ansible * Installation using Bash scripts ## Installation using Ansible * Supported OS: **Ubuntu 20.04, Ubuntu 18.04** * Prerequisite: **Python 3.6 or above** should be installed ### Install steps Ensure no ansible & docker installed, OR Lastest ansible and docker tools are installed with versions listed below or later. If ansible & docker is not installed in the OS, script `install_dependencies.sh` will install it. ```bash sudo apt-get update && sudo apt-get install -y git git clone https://github.com/sodafoundation/delfin.git # git checkout cd delfin/installer chmod +x install_dependencies.sh && source install_dependencies.sh cd ansible export PATH=$PATH:/home/$USER/.local/bin sudo -E env "PATH=$PATH" ansible-playbook site.yml -i local.hosts -v ``` **NOTE:** *Tools version used for verification of Delfin under Ubuntu 20.04* * ansible version: 5.10.0 * docker version: 20.10.21 * docker compose version: 2.12.2 ### Uninstall ```bash sudo -E env "PATH=$PATH" ansible-playbook clean.yml -i local.hosts -v ``` ### Logs Delfin processes execution logs can be found in /tmp/ folder * /tmp/api.log * /tmp/alert.log * /tmp/task.log * /tmp/exporter.log * /tmp/create_db.log ### How to use Delfin Delfin can be used either through dashboard or REST APIs. Please refer [user guides](https://docs.sodafoundation.io/guides/user-guides/delfin/dashboard/) ## Installation using Bash Scripts This is a standalone/non-containerized installer for SODA Infrastructure Manager (delfin) project. It contains a script and options to check the environment feasible for installing delfin. Installs required dependent software/binaries. * Supported OS: **Ubuntu 20.04, Ubuntu 18.04** * Prerequisite: * **Python 3.6 or above** should be installed * Ensure the logged-in user has **root privileges**. #### Installation steps ```bash sudo -i apt-get install python3 python3-pip git clone https://github.com/sodafoundation/delfin.git && git checkout cd delfin export PYTHONPATH=$(pwd) ./installer/install ``` Refer below for installer options #### Uninstall ```bash ./installer/uninstall ``` - #### [Optional] Setup Prometheus (for monitor performance metric through prometheus) Follow the below steps to setup delfin with prometheus. Once your setup is ready, you can register the storage devices for performance monitoring. Later, the performance metrics can be viewed on prometheus server. This example also guides you to configure and update the targets and interval for scraping the metrics. Alternatively, you can also watch this [video](https://drive.google.com/file/d/1WMmLXQeNlToZd0DP5hCFtDZ1IbNJpO6B/view?usp=drivesdk) for more detail. [Download the latest binaries from here](https://prometheus.io/download/) and run the below steps. 1. tar xvfz prometheus-*.tar.gz 2. cd prometheus-* 3. Edit the prometheus.yml and set the appropriate target, interval and metrics_api path. Below is sample example of prometheus.yml ###### prometheus.yml ``` global: scrape_interval: 10s scrape_configs: - job_name: delfin-prometheus metrics_path: /metrics static_configs: - targets: - 'localhost:8195' ``` 4. ./prometheus Example: ```sh root@root:/prometheus/prometheus-2.20.0.linux-amd64$ ./prometheus ``` ### Structure of the installer This installer comes with options of pre-check, install and uninstall pre-check: This script checks for the components required by delfin to function. If they are not present, precheck will install them. Install: Installs and starts the delfin process Uninstall: Uninstalls the delfin. Doesn't uninstall the required components. You may need to uninstall it explicitly using the native approach. ### How to install To get help, execute 'install -h'. It will show help information Install script can be executed with three different switches to: - either do a pre-check [./install -p] - only run the installer without doing pre-check (if pre-check has been executed explicitly) [./install -s] - execute pre-check as well the install [./install] #### For the available options for install, you can execute 'install -h' ```sh installer/install -h # Example root@root1:~/delfin-demo/delfin$ installer/install -h Usage install [--help|--precheck|--skip_precheck] Usage: install [-h|--help] install [-p|--precheck] install [-s|--skip_precheck] Flags: -h, --help Print the usage of install -p, --precheck Only perform system software requirements for installation -s, --skip_precheck If precheck is not required and directly install ``` #### For Pre-check, run below command ```sh installer/install -p # Example root@root1:~/delfin-demo/delfin$ installer/install -p OR root@root1:~/delfin-demo/delfin/installer$ ./install --precheck ``` #### Install without pre-check ```sh installer/install -s # Example root@root1:~/delfin-demo/delfin$ installer/install -s ``` #### Execute install with precheck ```sh installer/install # Example root@root1:~/delfin-demo/delfin$ installer/install ``` #### Configure multiple instances of delfin components Respective environment variable required to set for running multiple instances of delfin component before executing install command ```sh $ export DELFIN_<>_INSTANCES=<> $ installer/install # Example: Deploy delfin with 3 task and 2 alert instances $ export DELFIN_TASK_INSTANCES=3 $ export DELFIN_ALERT_INSTANCES=2 $ installer/install ``` Note: Multiple instances of exporter and api is not allowed currently. ### Logs All the installer logs are stored in the /var/log/soda directory. The logs can be uniquely identified based upon the timestamp. ## Test the running delfin setup/process 1. Make sure all delfin process are up and running ``` ps -ef|grep delfin # Example root@root1:~/delfin-demo/delfin# ps -ef |grep delfin root 25856 3570 0 00:21 pts/0 00:00:04 python3 /root/delfin-demo/delfin/installer/../delfin/cmd/api.py --config-file /etc/delfin/delfin.conf root 25858 3570 0 00:21 pts/0 00:00:09 python3 /root/delfin-demo/delfin/installer/../delfin/cmd/task.py --config-file /etc/delfin/delfin.conf root 25860 3570 0 00:21 pts/0 00:00:06 python3 /root/delfin-demo/delfin/installer/../delfin/cmd/alert.py --config-file /etc/delfin/delfin.conf root 25862 3570 0 00:21 pts/0 00:00:00 python3 /root/delfin-demo/delfin/installer/../delfin/exporter/exporter_server.py --config-file /etc/delfin/delfin.conf ``` 2. Register storages POST http://localhost:8190/v1/storages body : ``` { "vendor":"fake_storage", "model":"fake_driver", "rest":{ "host":"127.0.0.1", "port":8088, "username":"admin", "password":"pass" }, "extra_attributes":{ "array_id":"12345" } } ``` 3. Run the GET API to get the registered storages. GET http://localhost:8190/v1/storages use storage_id for registering storage for performance collection or alert monitoring 4. [Optional] If prometheus is configured, monitor the performance metrics on prometheus server at default location http://localhost:9090/graph ## Limitation Local installation, unlike Ansible installer, does not support SODA Dashboard integration. ================================================ FILE: installer/ansible/clean.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- # Defines some clean processes when banishing the nodes. - name: Cleanup delfin installation hosts: - delfin-nodes remote_user: root vars_files: - group_vars/delfin.yml gather_facts: false become: True tasks: - import_role: name: cleaner ================================================ FILE: installer/ansible/group_vars/delfin.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- # Dummy variable to avoid error because ansible does not recognize the # file as a good configuration file when no variable in it. dummy: ########### # GENERAL # ########### # This field indicates local machine host ip host_ip: 127.0.0.1 # delfin installation types are: 'repository', 'release' and 'container'' delfin_installation_type: repository # These fields below will specify the tag based on install from type delfin_branch: master # Delfin projects release versions delfin_release_version: v1.9.0 # delete all source packages source_purge: true # delete database database_purge: true # URLs, Environment Variables, IP addresses and Ports list soda_delfin_url: "http://{{ host_ip }}:8190" # These fields are NOT suggested to be modified delfin_work_dir: /opt/delfin-linux-amd64 delfin_config_dir: /etc/delfin venv: "{{ delfin_work_dir }}/venv" delfin_redis_ip: 127.0.0.1 delfin_redis_port: 6379 delfin_rabbitmq_user: delfinuser delfin_rabbitmq_pass: delfinpass # Configurable Perf collection interval in seconds performance_collection_interval: 900 # Enable dynamic subprocess optimization for Perf collection enable_dynamic_subprocess: false # Exporter configurations for Kafka, Prometheus & Alert Manager # Uncomment exporters to enable performance_exporters: #PerformanceExporterPrometheus, PerformanceExporterKafka alert_exporters: #AlertExporterPrometheus # Exporter configurations for Kafka delfin_exporter_kafka_ip: 'localhost' delfin_exporter_kafka_port: 9092 delfin_exporter_kafka_topic: 'delfin-kafka' # Exporter configurations for Prometheus delfin_exporter_prometheus_ip: 0.0.0.0 delfin_exporter_prometheus_port: 8195 delfin_exporter_prometheus_metrics_dir: '/var/lib/delfin/metrics' # Exporter configurations for Alert Manager delfin_exporter_alertmanager_host: 'localhost' delfin_exporter_alertmanager_port: 9093 ############## # REPOSITORY # ############## # If user specifies intalling from repository, then he can choose the specific # repository branch delfin_repo_branch: "{{ delfin_branch }}" # These fields are NOT suggested to be modified delfin_remote_url: https://github.com/sodafoundation/delfin.git ########### # RELEASE # ########### # If user specifies intalling from release,then he can choose the specific version delfin_release: "{{ delfin_release_version }}" # These fields are NOT suggested to be modified delfin_download_url: https://github.com/sodafoundation/delfin/archive/{{ delfin_release }}.tar.gz delfin_tarball_dir: /tmp/sodafoundation-delfin-{{ delfin_release }}-linux-amd64 ================================================ FILE: installer/ansible/local.hosts ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. [delfin-nodes] localhost ansible_connection=local ================================================ FILE: installer/ansible/roles/cleaner/scenarios/delfin.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- - name: Stop delfin containers, if started shell: "{{ item }}" with_items: - docker compose down become: yes ignore_errors: yes args: chdir: "{{ delfin_work_dir }}" - name: Get running delfin processes shell: "ps -ef | grep -v grep | grep -i 'python3 /opt/delfin-linux-amd64/delfin/' | awk '{print $2}'" register: running_processes - name: Kill running delfin processes shell: "kill {{ item }}" with_items: "{{ running_processes.stdout_lines }}" ignore_errors: yes - wait_for: path: "/proc/{{ item }}/status" state: absent with_items: "{{ running_processes.stdout_lines }}" ignore_errors: yes register: killed_processes - name: Force kill stuck processes shell: "kill -9 {{ item }}" with_items: "{{ killed_processes.results | select('failed') | map(attribute='item') | list }}" - name: Stop service of delfin, if started service: name: "{{ item }}" state: stopped with_items: - rabbitmq-server - redis-server become: yes ignore_errors: yes - name: clean up all delfin directories file: path: "{{ item }}" state: absent force: yes with_items: - "{{ delfin_work_dir }}" - "{{ delfin_tarball_dir }}" ignore_errors: yes tags: - delfin - clean when: - source_purge is undefined or source_purge != false ================================================ FILE: installer/ansible/roles/cleaner/scenarios/release.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- - name: clean up all release files if installed from release file: path: "{{ item }}" state: absent force: yes with_items: - "{{ delfin_tarball_dir }}" ignore_errors: yes tags: clean ================================================ FILE: installer/ansible/roles/cleaner/tasks/main.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - name: include scenarios/release.yml if installed from release include_tasks: scenarios/release.yml when: delfin_installation_type == "release" - name: include scenarios/delfin.yml for cleaning up delfin service include_tasks: scenarios/delfin.yml tags: delfin - name: clean all configuration and log files file: path: "{{ item }}" state: absent force: yes with_items: - "{{ delfin_config_dir }}" ignore_errors: yes ================================================ FILE: installer/ansible/roles/delfin-installer/scenarios/container.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- - name: build and install delfin using containerized deployment shell: "{{ item }}" with_items: - docker build -t sodafoundation/delfin . - DELFIN_REDIS_HOSTNAME=delfin_redis DELFIN_REDIS_PORT={{ delfin_redis_port }} DELFIN_METRICS_DIR={{ delfin_exporter_prometheus_metrics_dir }} DELFIN_HOST_IP={{ host_ip }} DELFIN_RABBITMQ_USER={{ delfin_rabbitmq_user }} DELFIN_RABBITMQ_PASS={{ delfin_rabbitmq_pass }} docker compose up -d become: yes args: chdir: "{{ delfin_work_dir }}" ================================================ FILE: installer/ansible/roles/delfin-installer/scenarios/rabbitmq.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- - name: Check if RabbitMQ Service Exists stat: path=/etc/init.d/rabbitmq-server register: rabbitmqservice - name: Remove useless packages from the cache apt: autoclean: yes - name: Import RabbitMQ public key apt_key: url: https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc state: present become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Add Launchpad Erlang PPA key apt_key: keyserver: keyserver.ubuntu.com id: F77F1EDA57EBB1CC become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Add PackageCloud RabbitMQ repository apt_key: url: https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey state: present become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Add RabbitMQ Erlang official repo apt_repository: repo: deb http://ppa.launchpad.net/rabbitmq/rabbitmq-erlang/ubuntu {{ ansible_distribution_release }} main state: present filename: rabbitmq become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Add RabbitMQ Server official repo apt_repository: repo: deb https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu/ {{ ansible_distribution_release }} main state: present filename: rabbitmq become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Import Erlang public key apt_key: url: https://packages.erlang-solutions.com/debian/erlang_solutions.asc state: present become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Add Erlang official repo apt_repository: repo: deb https://binaries.erlang-solutions.com/debian {{ ansible_distribution_release }} contrib state: present filename: erlang become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Install RabbitMQ package apt: name: rabbitmq-server update_cache: yes install_recommends: yes allow_unauthenticated: yes state: present become: yes when: - rabbitmqservice.stat.exists is undefined or rabbitmqservice.stat.exists == false - name: Start the RabbitMQ server service: name: rabbitmq-server state: started become: yes ================================================ FILE: installer/ansible/roles/delfin-installer/scenarios/redis.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- - name: Check if Redis Service Exists stat: path=/etc/init.d/redis-server register: redisservice - name: Ensure Redis is present shell: "{{ item }}" with_items: - ulimit -n 65536 - apt-get install -y redis-server when: - redisservice.stat.exists is undefined or redisservice.stat.exists == false - name: Change the redis default port replace: path: /etc/redis/redis.conf regexp: "port 6379" replace: "port {{ delfin_redis_port }}" become: yes - name: Ensure Redis is restarted service: name: redis-server state: restarted become: yes ================================================ FILE: installer/ansible/roles/delfin-installer/scenarios/source-code.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- # Install and start delfin - name: Check for delfin source code existed stat: path: "{{ delfin_work_dir }}/setup.py" register: delfinexisted - name: Download delfin source code if not exists git: repo: "{{ delfin_remote_url }}" dest: "{{ delfin_work_dir }}" version: "{{ delfin_repo_branch }}" when: - delfin_installation_type != "release" - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false - name: ensure delfin directory exists file: path: "{{ delfin_tarball_dir }}" state: directory when: - delfin_installation_type == "release" - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false - name: download and extract the delfin release tarball if not exists unarchive: src: "{{ delfin_download_url }}" dest: "{{ delfin_tarball_dir }}" remote_src: yes extra_opts: [--strip-components=1] when: - delfin_installation_type == "release" - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false - name: copy delfin tarball into delfin work directory copy: src: "{{ delfin_tarball_dir }}/" dest: "{{ delfin_work_dir }}" mode: 0755 become: yes when: - delfin_installation_type == "release" - delfinexisted.stat.exists is undefined or delfinexisted.stat.exists == false - name: Update redis ip & port configuration ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: coordination option: backend_server value: "{{ delfin_redis_ip }}:{{ delfin_redis_port }}" become: yes when: delfin_installation_type != "container" # Telemetry option for perf collection interval - name: Update Performance collection interval ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: TELEMETRY option: performance_collection_interval value: "{{ performance_collection_interval }}" become: yes # Telemetry option for enabling dynamic processes - name: Enable dynamic sub-processes for performance collection ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: TELEMETRY option: enable_dynamic_subprocess value: "{{ enable_dynamic_subprocess }}" become: yes # Performance Export Configurations - name: Check and remove performance exporters configs ini_file: create: no state: absent path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: DEFAULT option: performance_exporters value: "" become: yes when: - performance_exporters == None - name: Enable Performance Exporter configuration ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: DEFAULT option: "{{ item.option }}" value: "{{ item.value }}" with_items: - { option: performance_exporters, value: "{{ performance_exporters }}" } become: yes when: - performance_exporters != None # Performance exporter - Kafka configuration - name: Update Kafka Exporter configuration ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: KAFKA_EXPORTER option: "{{ item.option }}" value: "{{ item.value }}" with_items: - { option: kafka_ip, value: "{{ delfin_exporter_kafka_ip }}" } - { option: kafka_port, value: "{{ delfin_exporter_kafka_port }}" } - { option: kafka_topic_name, value: "{{ delfin_exporter_kafka_topic }}" } become: yes when: - performance_exporters != None - "'PerformanceExporterKafka' in performance_exporters" # Performance exporter - Prometheus configuration - name: Update Prometheus Exporter configuration ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: PROMETHEUS_EXPORTER option: "{{ item.option }}" value: "{{ item.value }}" with_items: - { option: metric_server_ip, value: "{{ delfin_exporter_prometheus_ip }}" } - { option: metric_server_port, value: "{{ delfin_exporter_prometheus_port }}" } - { option: metrics_dir, value: "{{ delfin_exporter_prometheus_metrics_dir }}" } become: yes when: - performance_exporters != None - "'PerformanceExporterPrometheus' in performance_exporters" # Alert Exporter Configurations - name: Check and remove alert exporters configs ini_file: create: no state: absent path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: DEFAULT option: alert_exporters value: "" become: yes when: - alert_exporters == None - name: Enable AlertManager Exporter configuration ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: DEFAULT option: "{{ item.option }}" value: "{{ item.value }}" with_items: - { option: alert_exporters, value: "{{ alert_exporters }}" } become: yes when: - alert_exporters != None - name: Update AlertManager Exporter configuration ini_file: create: no path: "{{ delfin_work_dir }}/etc/delfin/delfin.conf" section: PROMETHEUS_ALERT_MANAGER_EXPORTER option: "{{ item.option }}" value: "{{ item.value }}" with_items: - { option: alert_manager_host, value: "{{ delfin_exporter_alertmanager_host }}" } - { option: alert_manager_port, value: "{{ delfin_exporter_alertmanager_port }}" } become: yes when: - alert_exporters != None - "'AlertExporterPrometheus' in alert_exporters" - name: Create delfin config dir in host file: path: "{{ delfin_config_dir }}" state: directory mode: 0755 become: yes - name: copy delfin configs to host copy: src: "{{ delfin_work_dir }}/etc/delfin/{{ item }}" dest: "{{ delfin_config_dir }}/{{ item }}" mode: 0755 become: yes with_items: - delfin.conf - api-paste.ini ================================================ FILE: installer/ansible/roles/delfin-installer/scenarios/start-delfin.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- # Start delfin - name: Install sqlite3 package apt: name: sqlite3 state: present become: yes - name: Install python virtual environment pip: name: virtualenv state: latest executable: pip3 become: yes - name: Install python requirements pip: requirements: '{{ delfin_work_dir }}/requirements.txt' virtualenv: '{{ delfin_work_dir }}/venv' become: yes - name: Install python virtual environment activate script template: src: ./script/virtualenv3_exec.j2 dest: '{{ venv }}/exec' mode: 755 become: yes - name: Copy delfin db register script copy: src: ./script/create_db.py dest: '{{ delfin_work_dir }}/delfin/cmd/create_db.py' become: yes - name: Get previously running delfin processes shell: "ps -ef | grep -v grep | grep {{ delfin_work_dir }}/delfin/cmd/ | awk '{print $2}'" register: running_processes - name: Kill running delfin processes shell: "kill {{ item }}" with_items: "{{ running_processes.stdout_lines }}" - wait_for: path: "/proc/{{ item }}/status" state: absent with_items: "{{ running_processes.stdout_lines }}" ignore_errors: yes register: killed_processes - name: Force kill stuck processes shell: "kill -9 {{ item }}" with_items: "{{ killed_processes.results | select('failed') | map(attribute='item') | list }}" - name: Execute delfin manager shell: "{{ item }}" become: yes with_items: - '{{ venv }}/exec {{ delfin_work_dir }}/setup.py install' - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/create_db.py --config-file {{ delfin_config_dir }}/delfin.conf >/tmp/create_db.log 2>&1 &' - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/api.py --config-file {{ delfin_config_dir }}/delfin.conf >/tmp/api.log 2>&1 &' - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/task.py --config-file {{ delfin_config_dir }}/delfin.conf >/tmp/task.log 2>&1 &' - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/cmd/alert.py --config-file {{ delfin_config_dir }}/delfin.conf >/tmp/alert.log 2>&1 &' - '{{ venv }}/exec {{ delfin_work_dir }}/delfin/exporter/prometheus/exporter_server.py --config-file {{ delfin_config_dir }}/delfin.conf >/tmp/exporter.log 2>&1 &' args: chdir: "{{ delfin_work_dir }}" ================================================ FILE: installer/ansible/roles/delfin-installer/tasks/main.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - name: Started installation of delfin debug: msg: "Installing delfin {{ delfin_installation_type }} on {{ host_ip }}" - name: Check and Install Redis include_tasks: scenarios/redis.yml when: delfin_installation_type != "container" - name: Check and Install RabbitMQ include_tasks: scenarios/rabbitmq.yml when: delfin_installation_type != "container" - name: Get delfin source code include_tasks: scenarios/source-code.yml - name: Start delfin processes include_tasks: scenarios/start-delfin.yml when: delfin_installation_type != "container" - name: containerized delfin deployment include_tasks: scenarios/container.yml when: delfin_installation_type == "container" ================================================ FILE: installer/ansible/script/create_db.py ================================================ #!/usr/bin/env python # Copyright 2022 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """db create script for delfin """ import os import sys from oslo_config import cfg from delfin import db from delfin import version from oslo_db import options as db_options CONF = cfg.CONF db_options.set_defaults(cfg.CONF, connection='sqlite:////var/lib/delfin/delfin.sqlite') def remove_prefix(text, prefix): if text.startswith(prefix): return text[len(prefix):] return text def main(): CONF(sys.argv[1:], project='delfin', version=version.version_string()) connection = CONF.database.connection head_tail = os.path.split(connection) path = remove_prefix(head_tail[0], 'sqlite:///') if not os.path.exists(path): os.makedirs(path) db.register_db() if __name__ == '__main__': main() ================================================ FILE: installer/ansible/script/virtualenv3_exec.j2 ================================================ #!/usr/bin/env bash # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. source {{ venv }}/bin/activate python3 $@ ================================================ FILE: installer/ansible/site.yml ================================================ # Copyright 2022 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- # Defines deployment design and assigns role to node groups - hosts: - delfin-nodes gather_facts: true any_errors_fatal: true become: True - name: Install delfin hosts: delfin-nodes remote_user: root vars_files: - group_vars/delfin.yml gather_facts: false become: True tasks: - import_role: name: delfin-installer tags: delfin ================================================ FILE: installer/helper.py ================================================ #!/usr/bin/python3 # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import logging from logging.handlers import RotatingFileHandler from datetime import datetime log_filename = 'delfin_installer.log' + \ datetime.now().strftime("%d_%m_%Y_%H_%M_%s") LOGGING_FORMAT = "[%(asctime)s] [%(levelname)s] [%(filename)s] " \ "[%(funcName)s():%(lineno)s] [PID:%(process)d" \ "TID:%(thread)d] %(message)s" LOGGING_LEVEL = "INFO" logger = None logfile = '' delfin_log_dir = '/var/log/soda/' def init_logging(): global logfile global logger try: os.mkdir(delfin_log_dir) except OSError: pass logfile = delfin_log_dir + log_filename server_log_file = RotatingFileHandler(logfile, maxBytes=10000, backupCount=5) logger = logging.getLogger() logger.setLevel(logging.INFO) formatter = logging.Formatter(LOGGING_FORMAT) server_log_file.setFormatter(formatter) logger.addHandler(server_log_file) def create_dir(dirname=None): try: os.mkdir(dirname) except OSError as ose: logger.warning("Directory [%s] already exists: [%s]" % (dirname, ose)) pass except Exception as e: logger.error("Error in creating Directory [%s] [%s]" % (dirname, e)) return def create_file(filename): if not os.path.isfile(filename): os.mknod(filename, 0o777) def copy_files(src=None, dest=None): logger.info("Copying [%s] to [%s]" % (src, dest)) shutil.copy(src, dest) init_logging() ================================================ FILE: installer/install ================================================ #!/bin/bash # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PYTHON='python3' BASEDIR=$(dirname "$0") delfin_install_usage(){ echo "Usage $(basename $0) [--help|--precheck|--skip_precheck]" cat << DELFIN_INSTALL_INFO Usage: $(basename $0) [-h|--help] $(basename $0) [-p|--precheck] $(basename $0) [-s|--skip_precheck] Flags: -h, --help Print the usage of install -p, --precheck Only perform system software requirements for installation -s, --skip_precheck If precheck is not required and directly install DELFIN_INSTALL_INFO } precheck(){ delfin::log "Precheck called.." echo $! source ${BASEDIR}/precheck } precheck_and_install(){ delfin::log "precheck_and_install" precheck install } install(){ delfin::log "Install called.." ${PYTHON} ${BASEDIR}/install_delfin.py } main(){ first_arg=${1} second_arg=${2} source ${BASEDIR}/util.sh echo "${PROJECT_NAME} installation started... " case "${first_arg} ${second_arg}" in "1 -p"|"1 --precheck") precheck ;; "0 ") precheck_and_install ;; "1 -s"|"1 --skip_precheck") install ;; *) delfin_install_usage exit 1 ;; esac } # Entry point. START args="$# $*" main ${args} ================================================ FILE: installer/install.conf ================================================ # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Add all the required configs here python_version=3.x pip_version=3.x ================================================ FILE: installer/install_delfin.py ================================================ #!/usr/bin/python3 # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import traceback as tb from subprocess import CalledProcessError from installer.helper import copy_files, create_dir, \ logger, logfile, delfin_log_dir, create_file delfin_source_path = '' delfin_etc_dir = '/etc/delfin' delfin_var_dir = '/var/lib/delfin' conf_file = os.path.join(delfin_etc_dir, 'delfin.conf') proj_name = 'delfin' DEVNULL = '/dev/null' def _activate(): path_to_activate = os.path.join(delfin_source_path, 'installer', proj_name, 'bin/activate') command = '. ' + path_to_activate os.system(command) # Initialize the settings first def init(): pass def create_delfin_db(): try: db_path = os.path.join(delfin_source_path, 'script', 'create_db.py') subprocess.check_call(['python3', db_path, '--config-file', conf_file]) except CalledProcessError as cpe: logger.error("Got CPE error [%s]:[%s]" % (cpe, tb.print_exc())) return logger.info('db created ') def start_processes(): processes = ['api', 'task', 'alert'] # Start cmd processes for process in processes: env_var = 'DELFIN_' + process.upper() + '_INSTANCES' try: instances = os.environ.get(env_var) # Ignore multiple instance of api if not instances or process == 'api': instances = '1' start_process(process, int(instances)) except CalledProcessError as cpe: logger.error("Got CPE error [%s]:[%s]" % (cpe, tb.print_exc())) return except ValueError as e: logger.error( "Got invalid [%s] environment variable:[%s]" % (env_var, e)) return # Start exporter server process proc_path = os.path.join(delfin_source_path, 'delfin', 'exporter', 'prometheus', 'exporter_server.py') command = 'python3 ' + proc_path + ' --config-file ' + \ conf_file + ' >' + DEVNULL + ' 2>&1 &' logger.info("Executing command [%s]", command) os.system(command) logger.info("Exporter process_started") def start_process(process, instances=1): for instance in range(0, instances): proc_path = os.path.join(delfin_source_path, 'delfin', 'cmd', process + '.py') command = 'python3 ' + proc_path + ' --config-file ' + \ conf_file + ' >' + DEVNULL + ' 2>&1 &' logger.info("Executing command [%s]", command) os.system(command) logger.info("[%s] process_started", process) def install_delfin(): python_setup_comm = ['build', 'install'] req_logs = os.path.join(delfin_log_dir, 'requirements.log') command = 'pip3 install -r requirements.txt >' + req_logs + ' 2>&1' logger.info("Executing [%s]", command) os.system(command) setup_file = os.path.join(delfin_source_path, 'setup.py') for command in python_setup_comm: try: command = 'python3 ' + setup_file + ' ' + \ command + ' >>' + logfile logger.info("Executing [%s]", command) os.system(command) except CalledProcessError as cpe: logger.error("Got CPE error [%s]:[%s]" % (cpe, tb.print_exc())) return def main(): global delfin_source_path cwd = os.getcwd() logger.info("Current dir is %s" % cwd) this_file_dir = os.path.dirname(os.path.realpath(__file__)) delfin_source_path = os.path.join(this_file_dir, "../") logger.info("delfins [%s]" % delfin_source_path) os.chdir(delfin_source_path) logger.info(os.getcwd()) # create required directories create_dir(delfin_etc_dir) create_dir(delfin_var_dir) # Create blank prometheus exporter file filename = delfin_var_dir + '/' + 'delfin_exporter.txt' create_file(filename) # Copy required files # Copy api-paste.ini ini_file_src = os.path.join(delfin_source_path, 'etc', 'delfin', 'api-paste.ini') ini_file_dest = os.path.join(delfin_etc_dir, 'api-paste.ini') copy_files(ini_file_src, ini_file_dest) # Copy the conf file conf_file_src = os.path.join(delfin_source_path, 'etc', 'delfin', 'delfin.conf') copy_files(conf_file_src, conf_file) # install install_delfin() # create db create_delfin_db() # start start_processes() if __name__ == "__main__": main() ================================================ FILE: installer/install_dependencies.sh ================================================ #!/bin/bash # Install dependencies echo Installing dependencies sudo apt-get install -y make curl wget libltdl7 libseccomp2 libffi-dev gawk apt-transport-https ca-certificates curl gnupg gnupg-agent lsb-release software-properties-common sshpass pv echo Enabling docker repository sudo mkdir -p /etc/apt/keyrings curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg --yes echo \ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null # Update local repositories echo Updating local repositories sudo apt-get update # Install python dependencies echo Installing Python dependencies sudo apt-get install -y python3-distutils python3-testresources python3-pip python3 -m pip install -U pip # Update setuptool version if it is higher than 65 ver=$(python3 -m pip show setuptools | awk '/^Version: / {sub("^Version: ", ""); print}' | cut -d. -f1) if [ "$ver" -gt 65 ]; then echo Downgrade setuptools version to 65 python3 -m pip install setuptools==65.0.0 fi # Install ansible if not present if [ "`which ansible`" != "" ]; then echo ansible already installed, skipping. else echo Installing ansible python3 -m pip install --user ansible fi # Install docker if not present if [ "`which docker`" != "" ]; then echo Docker already installed, skipping. else echo Installing docker sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin fi # Ensure /usr/local/bin is in path export PATH=$PATH:/usr/local/bin ================================================ FILE: installer/precheck ================================================ #!/bin/bash # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Get the required packages from the conf BASEDIR=$(dirname "$0") python_version=$(awk -F "=" '/python_version/ {print $2}' ${BASEDIR}/install.conf) pip_version=$(awk -F "=" '/pip_version/ {print $2}' ${BASEDIR}/install.conf) apt_update_command="sudo apt-get update" apt_update_done=0 BIN_DIR=/usr/bin/ PROJECT_NAME='delfin' PROJECT_DIR=$(dirname "$0") log=delfin::log check_python(){ py_ver=$(python3 -V) if [ $? == 0 ]; then # result will be something like, "Python 3.x.x" # we need to get the 3.x.x out of it IFS=' ' read -ra tokens <<< "$py_ver" py_ver=${tokens[1]} installed_python=${py_ver%%\.*} req_python=${python_version%\.*} if [[ $installed_python -eq $req_python ]]; then delfin::log "Required python is [${req_python}] and installed is [${installed_python}]" test -n "$(which python${req_python})" && python_path=$(which python${req_python}) if [[ -z $python_path ]]; then delfin::log "Can not find required python version installed, please install it." exit 2 fi fi #ln -sf $python_path /usr/bin/python fi } check_pip(){ if [[ -x "$(which pip)" ]]; then test -n "$(which pip3)" && pip_path=$(which pip3) if [[ -z $pip_path ]]; then delfin::log "Can not find pip, please install it." exit 2 fi ln -sf $pip_path /usr/local/bin/pip fi } check_install_sqlite(){ sqlite_ver=$(sqlite3 -version) if [ $? -eq 0 ]; then delfin::log "Sqlite3 version[${sqlite_ver}] is already installed" return else # Install sqlite if [ ${apt_update_done} -eq 0 ]; then ${apt_update_command} apt_update_done=1 fi sudo apt-get -y install sqlite3 fi } check_install_rabbitmq(){ # check if MQ is installed mq_status=$(systemctl is-active --quiet rabbitmq-server.service) if [ $? -eq 0 ]; then delfin::log "RabbitMQ is already installed" return else #TODO check erlang # Import rabbitMQ ret=$(wget -O- https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc | sudo apt-key add -) if [ $? -eq 0 ]; then ret=$(wget -O- https://www.rabbitmq.com/rabbitmq-release-signing-key.asc | sudo apt-key add -) if [ $? -ne 0 ]; then delfin::log "Error in importing RabbitMQ" exit 1 fi else delfin::log "Error in importing rabbitMQ" exit 1 fi if [ ${apt_update_done} -eq 0 ]; then ${apt_update_command} apt_update_done=1 fi sudo apt-get -y install rabbitmq-server fi mq_status=$(systemctl is-active --quiet rabbitmq-server.service) if [ $? -eq 0 ]; then delfin::log "RabbitMQ is successfully installed" fi } check_install_redis(){ # Check if redis is installed or not redis_status=$(systemctl is-active --quiet redis.service) if [ $? -eq 0 ]; then delfin::log "Redis is already installed" return else if [ ${apt_update_done} -eq 0 ]; then ${apt_update_command} apt_update_done=1 fi sudo apt -y install redis-server fi } check_sys_req(){ ubuntu_release=$(${BIN_DIR}lsb_release -cs) if [[ ${ubuntu_release} == 'xenial' || ${ubuntu_release} == 'bionic' ]]; then delfin::log "System requirements satisfied" else delfin::log "Installation of ${PROJECT_NAME} is not supported on this platform" fi } check_install_p3_venv(){ venv_res=$(python3 -m pip install --user virtualenv) if [ $? -eq 0 ]; then delfin::log "venv is installed" fi py_venv=$(apt-get -y install python3-venv) if [ $? -eq 0 ]; then delfin::log "python3-venv is installed" fi if [[ $(lsb_release -rs) == "18.04" ]]; then virtualenv -p python3 ${BASEDIR}/${PROJECT_NAME} else venv_dir=$(python3 -m venv ${BASEDIR}/${PROJECT_NAME}) fi source ${BASEDIR}/${PROJECT_NAME}/bin/activate } main(){ source ${BASEDIR}/util.sh check_sys_req check_python check_pip check_install_sqlite check_install_rabbitmq check_install_redis check_install_p3_venv } # Entry function main ================================================ FILE: installer/uninstall ================================================ #!/bin/bash # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. curr_dir=$(dirname "$0") API_PROC_PATH=${curr_dir}/../delfin/cmd/api.py ALERT_PROC_PATH=${curr_dir}/../delfin/cmd/alert.py TASK_PROC_PATH=${curr_dir}/../delfin/cmd/task.py EXPORTER_SERVER_PATH=${curr_dir}/../delfin/ mapfile -t api_proc_id < <( ps -eaf |grep ${API_PROC_PATH} | grep -v grep |awk '{print $2}' ) mapfile -t alert_proc_id < <( ps -eaf |grep ${ALERT_PROC_PATH} | grep -v grep |awk '{print $2}' ) mapfile -t task_proc_id < <( ps -eaf |grep ${TASK_PROC_PATH} | grep -v grep |awk '{print $2}' ) mapfile -t exporter_server_id < <( ps -eaf |grep ${EXPORTER_SERVER_PATH} | grep -v grep |awk '{print $2}' ) for i in "${api_proc_id[@]}" do if [ ! $i == "" ]; then echo "Killing delfin process ${i}" $(kill -9 $i) fi done for i in "${task_proc_id[@]}" do if [ ! $i == "" ]; then echo "Killing delfin process ${i}" $(kill -9 $i) fi done for i in "${alert_proc_id[@]}" do if [ ! $i == "" ]; then echo "Killing delfin process ${i}" $(kill -9 $i) fi done for i in "${exporter_server_id[@]}" do if [ ! $i == "" ]; then echo "Killing delfin process ${i}" $(kill -9 $i) fi done $(rm -rf /etc/delfin) $(rm -rf /var/lib/delfin) ================================================ FILE: installer/util.sh ================================================ #!/bin/bash # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Script to hold the utilities required PROJECT_NAME='delfin' LOG_DIR=/var/log/soda LOGFILE=${LOGFILE:-/var/log/soda/delfin_pre_installer.log} if [ ! -d ${LOG_DIR} ]; then mkdir -p $LOG_DIR fi # Log function delfin::log(){ DATE=`date "+%Y-%m-%d %H:%M:%S"` USER=$(whoami) echo "${DATE} ${USER} execute $0 [INFO] $@ 2>&1" >> $LOGFILE } ================================================ FILE: openapi-spec/swagger.yaml ================================================ openapi: 3.0.0 info: version: "v1" title: SODA Infrastructure Management API description: SODA Infrastructure Management API for resource monitoring, alerting and management across multiple, heterogeneous storage backend. Currently supporting storage monitoring and alerting. contact: name: SODA Support url: 'https://sodafoundation.io/slack' email: support@sodafoundation.io license: name: Apache 2.0 url: 'http://www.apache.org/licenses/LICENSE-2.0.html' tags: - name: Storages paths: /v1/storages: get: tags: - Storages description: List all registered storage back ends operationId: GetStorageBackends parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: Comma separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: vendor in: query description: vendor(manufacturer) of the storage. required: false style: form explode: true schema: type: string - name: model in: query description: model of the storage required: false style: form explode: true schema: type: string - name: name in: query description: The storage name. required: false style: form explode: true schema: type: string - name: serial_number in: query description: The storage serial number. required: false style: form explode: true schema: type: string - name: status in: query description: The storage status required: false style: form explode: true schema: type: string enum: - normal - offline - abnormal responses: '200': description: Storage backend list available. content: application/json: schema: type: object required: - storages additionalProperties: true properties: storages: type: array title: The storages schema items: $ref: '#/components/schemas/StorageBackendResponse' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' post: tags: - Storages description: Register a storage device for management. operationId: addStorageBackends requestBody: description: Inventory item to add content: application/json: schema: $ref: '#/components/schemas/StorageBackendRegistry' responses: '200': description: 'Accepted, items added to the infrastructure management' content: application/json: schema: $ref: '#/components/schemas/StorageBackendResponse' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '409': description: An item already exists content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}': get: tags: - Storages description: Get details of a storage device operationId: GetStorageBackendbyID parameters: - name: storage_id in: path description: Database ID created for a storage backend . required: true style: simple explode: false schema: type: string responses: '200': description: Storage backend list available content: application/json: schema: $ref: '#/components/schemas/StorageBackendResponse' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' delete: tags: - Storages description: Unregister an already registered storage backend parameters: - name: storage_id in: path description: Database ID created for a storage backend . required: true style: simple explode: false schema: type: string responses: '202': description: Accepted '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/storages/sync: post: tags: - Storages description: Collect all resources from all registered backend and synchronize with DB. operationId: syncStorageBackends responses: '202': description: 'Accepted, ' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '409': description: An item already exists content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/sync': post: tags: - Storages description: Collect all resources from specified storage backend and synchronize with DB operationId: syncStorage parameters: - name: storage_id in: path description: Database ID created for a storage backend . required: true style: simple explode: false schema: type: string responses: '202': description: 'Accepted, ' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '409': description: An item already exists content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/access-info': get: tags: - Storages description: Get access info of a registered storage backend operationId: GettorageAccessInfobyID parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string responses: '200': description: Storage Access-info available content: application/json: schema: $ref: '#/components/schemas/StorageAccessInfoResponse' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' put: tags: - Storages description: Update a registered storage system access information in Infrastructure management DB. operationId: updateStorageAccessInfobyID parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string requestBody: content: application/json: schema: $ref: '#/components/schemas/StorageBackendRegistryUpdate' responses: '200': description: Storage backend available with updated access information content: application/json: schema: $ref: '#/components/schemas/StorageAccessInfoResponse' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/access-infos': get: tags: - Storages description: Get access info of all registered storages operationId: GetAllStorageAccessInfos responses: '200': description: Storage Access-info available content: application/json: schema: $ref: '#/components/schemas/StorageAccessInfosResponse' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/storage-pools: get: tags: - Storage Pools description: List all storage pools. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The pool name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a storage pool. required: false style: form explode: true schema: type: string - name: native_storage_pool_id in: query description: Actual ID of the storage pool in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: status in: query description: The pool status required: false style: form explode: true schema: type: string enum: - normal - offline - abnormal responses: '200': description: List storage pools query was success content: application/json: schema: type: object required: - storage_pools additionalProperties: true properties: storage_pools: type: array title: the storage pools schema items: $ref: '#/components/schemas/StoragePoolSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storage-pools/{id}': get: tags: - Storage Pools description: Get storage pool detail by pool ID. parameters: - name: id in: path description: Database ID created for a storage pool. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/StoragePoolSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/controllers: get: tags: - Controllers description: List all controllers. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The controller name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a controller. required: false style: form explode: true schema: type: string - name: native_controller_id in: query description: Actual ID of the controller in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: status in: query description: The controller status required: false style: form explode: true schema: type: string enum: - normal - offline - unknown responses: '200': description: List controllers query was success content: application/json: schema: type: object required: - controllers additionalProperties: true properties: controllers: type: array title: the controllers schema items: $ref: '#/components/schemas/ControllerSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/controllers/{id}': get: tags: - Controllers description: Get controller detail by controller ID. parameters: - name: id in: path description: Database ID created for a controller. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/ControllerSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/ports: get: tags: - Ports description: List all ports. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The port name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a port. required: false style: form explode: true schema: type: string - name: native_port_id in: query description: Actual ID of the port in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: connection_status in: query description: The port connection_status required: false style: form explode: true schema: type: string enum: - connected - disconnected - unknown - name: health_status in: query description: The port health_status required: false style: form explode: true schema: type: string enum: - normal - abnormal - unknown responses: '200': description: List port query was success content: application/json: schema: type: object required: - ports additionalProperties: true properties: ports: type: array title: the port schema items: $ref: '#/components/schemas/PortSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/ports/{id}': get: tags: - Ports description: Get port detail by port ID. parameters: - name: id in: path description: Database ID created for a port. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/PortSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/disks': get: tags: - Disks description: List all disks. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The disk name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a disk. required: false style: form explode: true schema: type: string - name: native_disk_id in: query description: Actual ID of the port in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: native_disk_group_id in: query description: Database ID created for a disk group. required: false style: form explode: true schema: type: string - name: status in: query description: The disk status required: false style: form explode: true schema: type: string enum: - normal - offline - abnormal responses: '200': description: List disk query was success content: application/json: schema: type: object required: - disks additionalProperties: true properties: disks: type: array title: the disk schema items: $ref: '#/components/schemas/DiskSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/disks/{id}': get: tags: - Disks description: Get disk detail by disk ID. parameters: - name: id in: path description: Database ID created for a disk. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/DiskSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/volumes: get: tags: - Volumes description: List all storage volumes. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The volume name required: false style: form explode: true schema: type: string - name: native_storage_pool_id in: query description: Actual ID of the storage pool in the storage backend. required: false style: form explode: true schema: type: string - name: native_volume_id in: query description: Actual ID created for the volume in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: status in: query description: The volume status required: false style: form explode: true schema: type: string enum: - noraml - offline - abnormal responses: '200': description: List volumes operation was successful content: application/json: schema: type: object required: - volumes additionalProperties: true properties: volumes: type: array items: $ref: '#/components/schemas/VolumeRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/volumes/{id}': get: tags: - Volumes description: Get storage volume detail by volume ID. parameters: - name: id in: path description: Database ID created for a volume. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/VolumeRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/filesystems: get: tags: - Filesystems description: List all filesystems. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The filesystem name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a filesystem. required: false style: form explode: true schema: type: string - name: native_filesystem_id in: query description: Actual ID of the filesystem in the storage backend. required: false style: form explode: true schema: type: string - name: native_pool_id in: query description: Pool ID of the filesystem in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: security_mode in: query description: The filesystem security modes required: false style: form explode: true schema: type: string enum: - mixed - native - ntfs - unix - name: status in: query description: The filesystem status required: false style: form explode: true schema: type: string enum: - normal - faulty responses: '200': description: List filesystem query was success content: application/json: schema: type: object required: - filesystems additionalProperties: true properties: filesystems: type: array title: the filesystem schema items: $ref: '#/components/schemas/FilesystemSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/filesystems/{id}': get: tags: - Filesystems description: Get filesystem detail by filesystem ID. parameters: - name: id in: path description: Database ID created for a filesystem. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/FilesystemSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/qtrees: get: tags: - Qtrees description: List all qtrees. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The qtree name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a qtree. required: false style: form explode: true schema: type: string - name: native_qtree_id in: query description: Actual ID of the qtree in the storage backend. required: false style: form explode: true schema: type: string - name: native_filesystem_id in: query description: Filesystem ID of the qtree in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: security_mode in: query description: The qtree security modes required: false style: form explode: true schema: type: string enum: - mixed - native - ntfs - unix responses: '200': description: List qtree query was success content: application/json: schema: type: object required: - qtrees additionalProperties: true properties: qtrees: type: array title: the qtree schema items: $ref: '#/components/schemas/QtreeSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/qtrees/{id}': get: tags: - Qtrees description: Get qtree detail by qtree ID. parameters: - name: id in: path description: Database ID created for a qtree. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/QtreeSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/quotas: get: tags: - Quotas description: List all quotas. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The quota name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a quota. required: false style: form explode: true schema: type: string - name: native_quota_id in: query description: Actual ID of the quota in the storage backend. required: false style: form explode: true schema: type: string - name: native_filesystem_id in: query description: Filesystem ID of the quota in the storage backend. required: false style: form explode: true schema: type: string - name: native_qtree_id in: query description: Qtree ID of the quota in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: type in: query description: The quota types required: false style: form explode: true schema: type: string enum: - filesystem - tree - user - group responses: '200': description: List quota query was success content: application/json: schema: type: object required: - quotas additionalProperties: true properties: quotas: type: array title: the quota schema items: $ref: '#/components/schemas/QuotaSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/quotas/{id}': get: tags: - Quotas description: Get quota detail by quota ID. parameters: - name: id in: path description: Database ID created for a quota. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/QuotaSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' /v1/shares: get: tags: - Shares description: List all shares. parameters: - name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The share name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a share. required: false style: form explode: true schema: type: string - name: native_share_id in: query description: Actual ID of the share in the storage backend. required: false style: form explode: true schema: type: string - name: native_filesystem_id in: query description: Filesystem ID of the share in the storage backend. required: false style: form explode: true schema: type: string - name: native_qtree_id in: query description: Qtree ID of the share in the storage backend. required: false style: form explode: true schema: type: string - name: storage_id in: query description: Database ID created for a storage backend. required: false style: form explode: true schema: type: string - name: protocol in: query description: The share protocol required: false style: form explode: true schema: type: string enum: - cifs - nfs - ftp - hdfs responses: '200': description: List share query was success content: application/json: schema: type: object required: - shares additionalProperties: true properties: shares: type: array title: the share schema items: $ref: '#/components/schemas/ShareSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/shares/{id}': get: tags: - Shares description: Get share detail by share ID. parameters: - name: id in: path description: Database ID created for a share. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/ShareSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/snmp-config': get: tags: - SnmpConfig description: >- Get details snmp alert source information configured on behalf of backend devices parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/SnmpConfigRespSpec' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' put: tags: - SnmpConfig description: >- Modify snmp alert source information configured on behalf of backend devices operationId: putSnmpConfigInfo parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string requestBody: $ref: '#/components/requestBodies/SnmpConfigUpdateSpec' responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/SnmpConfigRespSpec' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' delete: tags: - SnmpConfig description: >- Removes snmp alert source information configured on behalf of backend devices parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: {} '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/alerts/{sequence_number}': delete: tags: - Alerts description: Clear the alert for the input alert sequence number parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string - name: sequence_number in: path description: Sequence number which uniquely maps to the trap sent by a backend. required: true style: simple explode: false schema: type: string responses: '200': description: OK content: {} '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '404': description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/alerts/sync': post: tags: - Alerts description: Sync alerts from storage device operationId: syncStorageAlerts parameters: - name: storage_id in: path description: Database ID created for a storage backend . required: true style: simple explode: false schema: type: string requestBody: $ref: '#/components/requestBodies/StorageBackendAlertSync' responses: '200': description: 'Accepted, ' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '409': description: An item already exists content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occurred. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{id}/capabilities': get: tags: - Performance Monitoring description: | Provides supported capabilities of Infrastructure management by storage configured in delfin deployment. parameters: - name: id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string responses: '200': description: Returns specifications of storage content: application/json: schema: $ref: '#/components/schemas/StorageCapabilitiesResponse' '404': description: The storage does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: Invalid capabilities. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '505': description: Capability feature not supported. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/snmp-configs': get: tags: - Storages description: >- Get all details snmp alert source information configured on behalf of backend devices responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/SnmpConfigsRespSpec' '400': description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/storage-host-initiators': get: tags: - Masking views description: List all storage host initiators. parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string - name: limit in: query description: >- Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: >- Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The storage host initiator name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a storage host initiator. required: false style: form explode: true schema: type: string - name: description in: query description: The storage host initiator description required: false style: form explode: true schema: type: string - name: alias in: query description: The storage host initiator alias name required: false style: form explode: true schema: type: string - name: wwn in: query description: The storage host initiator worrld wide name required: false style: form explode: true schema: type: string - name: native_storage_host_initiator_id in: query description: >- Actual ID of the storage host initiator in the storage backend. required: false style: form explode: true schema: type: string - name: native_storage_host_id in: query description: >- Actual ID of the associated storage host in the storage backend if any. required: false style: form explode: true schema: type: string - name: status in: query description: The storage host initiator status required: false style: form explode: true schema: type: string enum: - normal - offline - abnormal - unknown responses: '200': description: List storage host initiators query was success content: application/json: schema: type: object required: - storage_host_initiators additionalProperties: true properties: storage_host_initiators: type: array title: the storage host initiators schema items: $ref: '#/components/schemas/StorageHostInitiatorRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/storage-hosts': get: tags: - Masking views description: List all storage hosts. parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string - name: limit in: query description: >- Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: >- Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The storage host name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a storage host. required: false style: form explode: true schema: type: string - name: description in: query description: The storage host description required: false style: form explode: true schema: type: string - name: native_storage_host_id in: query description: Actual ID of the storage host in the storage backend. required: false style: form explode: true schema: type: string - name: status in: query description: The storage host status required: false style: form explode: true schema: type: string enum: - normal - offline - abnormal - unknown - name: ip_address in: query description: Ip address of the storage host. required: false style: form explode: true schema: type: string - name: os_type in: query description: Operating system of the storage host required: false style: form explode: true schema: type: string enum: - windows - linux responses: '200': description: List storage hosts query was success content: application/json: schema: type: object required: - storage_hosts additionalProperties: true properties: storage_hosts: type: array title: the storage hosts schema items: $ref: '#/components/schemas/StorageHostRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/storage-host-groups': get: tags: - Masking views description: List all storage host groups. parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string - name: limit in: query description: >- Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: >- Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The storage host group name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a storage host group. required: false style: form explode: true schema: type: string - name: description in: query description: The storage host group description required: false style: form explode: true schema: type: string - name: native_storage_host_group_id in: query description: Actual ID of the storage host group in the storage backend. required: false style: form explode: true schema: type: string responses: '200': description: List storage host groups query was success content: application/json: schema: type: object required: - storage_host_groups additionalProperties: true properties: storage_host_groups: type: array title: the storage host group schema items: $ref: '#/components/schemas/StorageHostGroupRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/port-groups': get: tags: - Masking views description: List all port groups. parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string - name: limit in: query description: >- Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: >- Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The port group name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a port group. required: false style: form explode: true schema: type: string - name: description in: query description: The port group description required: false style: form explode: true schema: type: string - name: native_port_group_id in: query description: Actual ID of the port group in the storage backend. required: false style: form explode: true schema: type: string responses: '200': description: List port groups query was success content: application/json: schema: type: object required: - port_groups additionalProperties: true properties: port_groups: type: array title: the port groups schema items: $ref: '#/components/schemas/PortGroupRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/volume-groups': get: tags: - Masking views description: List all volume groups. parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string - name: limit in: query description: >- Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: >- Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The volume group name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a volume group. required: false style: form explode: true schema: type: string - name: description in: query description: The volume group description required: false style: form explode: true schema: type: string - name: native_volume_group_id in: query description: Actual ID of the volume group in the storage backend. required: false style: form explode: true schema: type: string responses: '200': description: List volume groups query was success content: application/json: schema: type: object required: - volume_groups additionalProperties: true properties: volume_groups: type: array title: the volume groups schema items: $ref: '#/components/schemas/VolumeGroupRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '/v1/storages/{storage_id}/masking-views': get: tags: - Masking views description: List all masking views. parameters: - name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string - name: limit in: query description: >- Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 - name: offset in: query description: >- Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - name: sort in: query description: >- Comma-separated list of sort keys and optional sort directions in the form of key:val required: false style: form explode: true schema: type: string example: 'sort=name:desc,id:asc' - name: name in: query description: The masking view name required: false style: form explode: true schema: type: string - name: id in: query description: Database ID created for a masking view. required: false style: form explode: true schema: type: string - name: description in: query description: The masking view description required: false style: form explode: true schema: type: string - name: native_masking_view_id in: query description: Actual ID of the masking view in the storage backend. required: false style: form explode: true schema: type: string - name: native_storage_host_group_id in: query description: >- Actual ID of the storage host group in the storage backend. required: false style: form explode: true schema: type: string - name: native_volume_group_id in: query description: Actual ID of the volume group in the storage backend. required: false style: form explode: true schema: type: string - name: native_port_group_id in: query description: Actual ID of the port group in the storage backend. required: false style: form explode: true schema: type: string - name: native_storage_host_id in: query description: Actual ID of the storage host in the storage backend. required: false style: form explode: true schema: type: string - name: native_volume_id in: query description: Actual ID of the volume in the storage backend. required: false style: form explode: true schema: type: string - name: native_port_id in: query description: Actual ID of the port in the storage backend. required: false style: form explode: true schema: type: string responses: '200': description: List masking views query was success content: application/json: schema: type: object required: - masking_views additionalProperties: true properties: masking_views: type: array title: the masking views schema items: $ref: '#/components/schemas/MaskingViewRespSpec' '401': description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '403': description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' '500': description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' components: schemas: BaseModel: type: object properties: id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 created_at: type: string format: date-time readOnly: true example: '2017-07-10T14:36:58.014Z' updated_at: type: string format: date-time readOnly: true example: '2017-07-10T14:36:58.014Z' RestAccessInfoRegistry: required: - host - port - username - password type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "8008" username: type: string example: admin password: type: string SSHAccessInfoRegistry: required: - host - port - username - password - pub_key - pub_key_type type: object properties: host: type: string example: '10.0.0.1' port: type: string example: '22' username: type: string example: admin password: type: string pub_key: type: string example: '73:d8:34:18:70:2a:ae:d8:1c:a5:44:40:ef:50:d0:63' pub_key_type: type: string enum: ['ed25519', 'ecdsa', 'rsa'] CLIAccessInfoRegistry: required: - host - username - password type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "8888" username: type: string example: admin password: type: string SMISAccessInfoRegistry: required: - host - username - password type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "5989" username: type: string example: admin password: type: string namespace: type: string RestAccessInfoUpdate: required: - username - password type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "8008" username: type: string example: admin password: type: string SSHAccessInfoUpdate: required: - username - password type: object properties: host: type: string example: '10.0.0.1' port: type: string example: '22' username: type: string example: admin password: type: string pub_key: type: string example: '73:d8:34:18:70:2a:ae:d8:1c:a5:44:40:ef:50:d0:63' pub_key_type: type: string enum: ['ed25519', 'ecdsa', 'rsa'] CLIAccessInfoUpdate: required: - username - password type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "8888" username: type: string example: admin password: type: string SMISAccessInfoUpdate: required: - username - password type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "5989" username: type: string example: admin password: type: string namespace: type: string RestAccessInfoResponse: required: - host - port - username type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "8008" username: type: string example: admin password: type: string SSHAccessInfoResponse: required: - host - port - username type: object properties: host: type: string example: '10.0.0.1' port: type: string example: '22' username: type: string example: admin password: type: string pub_key: type: string example: '73:d8:34:18:70:2a:ae:d8:1c:a5:44:40:ef:50:d0:63' pub_key_type: type: string enum: ['ed25519', 'ecdsa', 'rsa'] CLIAccessInfoResponse: required: - host - port - username type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "8008" username: type: string example: admin password: type: string SMISAccessInfoResponse: required: - host - port - username - namespace type: object properties: host: type: string example: 10.0.0.1 port: type: string example: "8008" username: type: string example: admin password: type: string namespace: type: string StorageBackendRegistry: required: - model - vendor anyOf: - $ref: '#/components/schemas/RestAccessInfoRegistry' - $ref: '#/components/schemas/SSHAccessInfoRegistry' - $ref: '#/components/schemas/CLIAccessInfoRegistry' - $ref: '#/components/schemas/SMISAccessInfoRegistry' type: object properties: name: type: string example: EMC-VMAX-123456 description: type: string example: VMAX storage lab1 vendor: type: string example: dellemc model: type: string example: vmax rest: $ref: '#/components/schemas/RestAccessInfoRegistry' ssh: $ref: '#/components/schemas/SSHAccessInfoRegistry' cli: $ref: '#/components/schemas/CLIAccessInfoRegistry' smis: $ref: '#/components/schemas/SMISAccessInfoRegistry' extra_attributes: type: object additionalProperties: type: string example: array_id: 00002554321 StorageBackendRegistryUpdate: anyOf: - $ref: '#/components/schemas/RestAccessInfoUpdate' - $ref: '#/components/schemas/SSHAccessInfoUpdate' - $ref: '#/components/schemas/CLIAccessInfoUpdate' - $ref: '#/components/schemas/SMISAccessInfoUpdate' type: object properties: rest: $ref: '#/components/schemas/RestAccessInfoUpdate' ssh: $ref: '#/components/schemas/SSHAccessInfoUpdate' cli: $ref: '#/components/schemas/CLIAccessInfoUpdate' smis: $ref: '#/components/schemas/SMISAccessInfoUpdate' extra_attributes: type: object additionalProperties: type: string example: controller1: string ip1: string shhKeyPath: string StorageBackendResponse: type: object properties: id: type: string name: type: string example: EMC-VMAX-123456 description: type: string example: VMAX storage lab1 vendor: type: string example: Dell EMC model: type: string example: VMAX250F status: type: string example: normal firmware: type: string example: 5978.278 serial_number: type: string example: '0002004355' location: type: string created_at: type: string updated_at: type: string sync_status: type: string enum: - SYNCED - SYNCING total_capacity: type: integer format: int64 used_capacity: type: integer format: int64 free_capacity: type: integer format: int64 StorageAccessInfoResponse: type: object properties: id: type: string rest: $ref: '#/components/schemas/RestAccessInfoResponse' ssh: $ref: '#/components/schemas/SSHAccessInfoResponse' cli: $ref: '#/components/schemas/CLIAccessInfoResponse' smis: $ref: '#/components/schemas/SMISAccessInfoResponse' vendor: type: string example: dellemc model: type: string example: vmax extra_attributes: type: object additionalProperties: type: string example: array_id: string StorageAccessInfosResponse: description: Response for all access infos configuration. type: object properties: access_infos: type: array description: the list of access info items: $ref: '#/components/schemas/StorageAccessInfoResponse' StoragePoolSpec: description: >- A storage pool is disocovered and updated by task manager Each pool can be regarded as a physical storage pool or a virtual storage pool. It is a logical and atomic pool and can be abstracted from any storage platform. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name type: object properties: name: type: string id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_storage_pool_id: type: string readOnly: true example: SRP_1 storage_type: type: string enum: - block - file - unified description: type: string status: type: string enum: - normal - offline - abnormal total_capacity: type: integer format: int64 used_capacity: type: integer format: int64 free_capacity: type: integer format: int64 ControllerSpec: description: >- A controller is discovered and updated by task manager. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name type: object properties: name: type: string id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_controller_id: type: string readOnly: true example: Controller_A soft_version: type: string location: type: string status: type: string enum: - normal - offline - unknown cpu_info: type: string memory_size: type: integer format: int64 PortSpec: description: >- A port is discovered and updated by task manager. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name type: object properties: name: type: string id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_port_id: type: string readOnly: true example: Port_A native_parent_id: type: string readOnly: true example: Controller_A speed: type: integer max_speed: type: integer location: type: string connection_status: type: string enum: - connected - disconnected - unknown health_status: type: string enum: - normal - abnormal - unknown type: type: string enum: - fc - iscsi - ficon - fcoe - eth - sas - ib - other logical_type: type: string enum: - frontend - backend - service - management - internal - maintenance - interconnect - other wwn: type: string mac_address: type: string ipv4: type: string ipv4_mask: type: string ipv6: type: string ipv6_mask: type: string DiskSpec: description: >- A disk is discovered and updated by task manager. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name type: object properties: name: type: string id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_disk_id: type: string readOnly: true example: Disk_A serial_number: type: string readOnly: true example: SN00112233 manufacturer: type: string model: type: string firmware: type: string speed: type: integer capacity: type: integer location: type: string status: type: string enum: - normal - offline - abnormal physical_type: type: string enum: - sata - sas - ssd - nl-sas - unknown logical_type: type: string enum: - free - member - hotspare - cache health_score: type: integer native_disk_group_id: type: string FilesystemSpec: description: >- A filesystem is discovered and updated by task manager. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name type: object properties: name: type: string id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_filesystem_id: type: string readOnly: true example: Filesystem_A native_pool_id: type: string readOnly: true example: Pool_A total_capacity: type: integer used_capacity: type: integer free_capacity: type: integer status: type: string enum: - normal - faulty worm: type: string enum: - non_worm - audit_log - compliance - enterprise type: type: string enum: - thick - thin deduplicated: type: boolean compressed: type: boolean security_mode: type: string enum: - mixed - native - ntfs - unix QtreeSpec: description: >- A qtree is discovered and updated by task manager. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name type: object properties: name: type: string id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_qtree_id: type: string readOnly: true example: Qtree_A native_filesystem_id: type: string readOnly: true example: Filesystem_A path: type: string security_mode: type: string enum: - mixed - native - ntfs - unix QuotaSpec: description: >- A quota is discovered and updated by task manager. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name - type type: object properties: id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_quota_id: type: string readOnly: true example: Quota_A name: type: string type: type: string enum: - filesystem - tree - user - group storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_filesystem_id: type: string readOnly: true example: Filesystem_A native_qtree_id: type: string readOnly: true example: Qtree_A capacity_hard_limit: type: integer format: int64 example: 100 capacity_soft_limit: type: integer format: int64 example: 80 file_hard_limit: type: integer format: int64 example: 10 file_soft_limit: type: integer format: int64 example: 8 used_capacity: type: integer format: int64 example: 100 file_count: type: integer format: int64 example: 10 user_group_name: type: string ShareSpec: description: >- A share is discovered and updated by task manager. allOf: - $ref: '#/components/schemas/BaseModel' - required: - id - name type: object properties: name: type: string id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_share_id: type: string readOnly: true example: Share_A native_filesystem_id: type: string readOnly: true example: Filesystem_A native_qtree_id: type: string protocol: type: string enum: - cifs - nfs - ftp - hdfs path: type: string VolumeRespSpec: description: Volume is an device created by storage service, it can be attached to physical machine or virtual machine instance. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: name: type: string description: type: string status: type: string enum: - available - error storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 total_capacity: type: integer format: int64 example: 2 used_capacity: type: integer format: int64 example: 2 free_capacity: type: integer format: int64 example: 2 compressed: type: boolean example: false deduplicated: type: boolean example: false type : type: string enum: - thick - thin native_volume_id: type: string wwn: type: string native_storage_pool_id: type: string SnmpConfigUpdateSpec: required: - host - version type: object properties: version: type: string description: SNMP version. Should be mandatorily set by user example: SNMPV2C enum: - SNMPV2C - SNMPV3 community_string: type: string description: Community string. This should be filled if version is V2C username: type: string description: SNMP V3 usm username. This should be filled if version is V3 engine_id: type: string description: >- Engind ID of the device which will be sending the traps. This should be filled if version is V3 security_level: type: string description: Security level for the user. This should be filled if version is V3 example: noAuthnoPriv enum: - noAuthnoPriv - authNoPriv - authPriv auth_protocol: type: string description: >- Authentication protocol to be selected. This should be filled if authNoPriv or authPriv is set as security_level example: MD5 enum: - MD5 - SHA auth_key: type: string description: >- Authentication key. This should be filled if authNoPriv or authPriv is set privacy_protocol: type: string description: >- Privacy or encryption protocol to be selected. This should be filled if authPriv is set as security_level example: DES enum: - 3DES - DES - AES privacy_key: type: string description: >- Privacy or encryption password. This should be filled if authPriv is set as security_level host: type: string example: 10.0.0.1 context_name: type: string description: Context name of the alert source example: "New Context" retry_num: type: integer description: >- Maximum number of retries while connecting to alert source By default, set to 1 example: 2 expiration: type: integer description: >- Expiration time (in sec) for one alert source connect request By default, set to 2 example: 60 port: type: integer description: >- Port for connecting to alert source By default, set to 161 example: 20162 description: SNMP alert source configuration attributes. SnmpConfigRespSpec: description: Response for snmp alert source configuration. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: version: type: string description: SNMP version. Should be mandatorily set by user example: SNMPV2C enum: - SNMPV2C - SNMPV3 community_string: type: string description: Community string. This should be filled ig version is V2C username: type: string description: SNMP V3 usm username. This should be filled ig version is V3 engine_id: type: string description: Engind ID of the device which will be sending the traps security_level: type: string description: Security level for the user example: noAuthnoPriv enum: - noAuthnoPriv - authNoPriv - authPriv auth_protocol: type: string description: >- Authentication protocol to be selected. This should be filled if authNoPriv or authPriv is set as security_level example: MD5 enum: - MD5 - SHA auth_key: type: string description: >- Authentication key. This should be filled if authNoPriv or authPriv is set privacy_protocol: type: string description: >- Privacy or encryption protocol to be selected. This should be filled if authPriv is set as security_level example: DES enum: - 3DES - DES - AES privacy_key: type: string description: >- Privacy or encryption password. This should be filled if authPriv is set as security_level host: type: string description: All alert source ips of the device example: 10.0.0.1,127.0.0.1 context_name: type: string description: Context name of the alert source example: "New Context" retry_num: type: integer description: >- Maximum number of retries while connecting to alert source By default, set to 1 example: 2 expiration: type: integer description: >- Expiration time (in sec) for one alert source connect request By default, set to 2 example: 60 port: type: integer description: >- Port for connecting to alert source By default, set to 161 example: 20162 SnmpConfigsRespSpec: description: Response for all snmp alert source configuration. type: object properties: snmp_configs: type: array description: the list of snmp configs items: $ref: '#/components/schemas/SnmpConfigRespSpec' StorageHostInitiatorRespSpec: description: >- Storage host initiator allows a host to gain access to the storage array, It may or may not have been attached to storage host. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: name: type: string description: type: string readOnly: true example: "storage host initiator" alias: type: string readOnly: true example: "storage host initiator" wwn: type: string readOnly: true example: "storage host initiator1" storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_storage_host_initiator_id: type: string readOnly: true description: >- Actual ID of the storage host initiator in the storage backend. example: storage_host_initiator_0 native_storage_host_id: type: string readOnly: true description: Actual ID of the storage host in the storage backend. example: storage_host_0 status: type: string readOnly: true enum: - normal - offline - abnormal - unknown StorageHostRespSpec: description: >- Storage host is a consumer of volume from storage. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: name: type: string description: type: string readOnly: true storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_storage_host_id: type: string readOnly: true description: Actual ID of the storage host in the storage backend. example: storage_host_0 status: type: string readOnly: true enum: - normal - offline - abnormal - unknown ip_address: type: string readOnly: true description: Ip address of the storage host. example: "192.168.1.4" os_type: type: string readOnly: true description: Operating system of the storage host. enum: - windows - linux storage_host_initiators: type: array items: type: string readOnly: true description: List of storage host initiator native ids. StorageHostGroupRespSpec: description: >- Storage host group is a consumer of volume from storage. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: name: type: string description: type: string readOnly: true storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_storage_host_group_id: type: string readOnly: true description: Actual ID of the storage host group in the storage backend. example: storage_host_group0 storage_hosts: type: array items: type: string readOnly: true description: List of storage host native ids. PortGroupRespSpec: description: >- Port group is collection of ports from storage. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: name: type: string description: type: string readOnly: true storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_port_group_id: type: string readOnly: true description: Actual ID of the port group in the storage backend. example: port_group_0 ports: type: array items: type: string readOnly: true description: List of ports native ids. VolumeGroupRespSpec: description: >- Volume group is collection of volumes from storage. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: name: type: string description: type: string readOnly: true storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_volume_group_id: type: string readOnly: true description: Actual ID of the volume group in the storage backend. example: volume_group_0 volumes: type: array items: type: string readOnly: true description: List of volumes native ids. MaskingViewRespSpec: description: >- Masking view is and object which shows the path from host and lun. allOf: - $ref: '#/components/schemas/BaseModel' - type: object properties: name: type: string description: type: string readOnly: true storage_id: type: string readOnly: true example: 084bf71e-a102-11e7-88a8-e31fe6d52248 native_masking_view_id: type: string readOnly: true description: Actual ID of the masking view in the storage backend. example: masking_view_0 native_storage_host_group_id: type: string readOnly: true description: >- Actual ID of the storage host group in the storage backend. example: storage_host_group_0 native_volume_group_id: type: string readOnly: true description: Actual ID of the volume group in the storage backend. example: volume_group_0 native_port_group_id: type: string readOnly: true description: Actual ID of the port group in the storage backend. example: port_group_0 native_storage_host_id: type: string readOnly: true description: Actual ID of the storage host in the storage backend. example: storage_host_0 native_volume_id: type: string readOnly: true description: Actual ID of the volume in the storage backend. example: volume_0 native_port_id: type: string readOnly: true description: Actual ID of the port in the storage backend. example: port_0 StorageBackendAlertSync: type: object properties: begin_time: type: integer format: int64 Start time(in milliseconds) for alert sync. It is optional. If not provided, alerts are fetched without filtering start time example: 13577777777777766 end_time: type: integer format: int64 description: >- End time(in milliseconds) for alert sync. It is optional. If not provided, alerts are fetched without filtering end time example: 13577777777777777 ErrorSpec: required: - error_code - error_msg - error_args type: object properties: error_code: type: string error_msg: type: string error_args: type: array items: type: string description: >- Detailed HTTP error response, which consists of a HTTP status code, and a custom error message unique for each failure case. StorageCapabilitiesResponse: type: object required: - metadata - spec properties: metadata: type: object properties: model: type: string description: Name of the supported storage (driver) example: VMAX250F vendor: type: string description: Name of the vendor example: Dell EMC spec: type: object required: - is_historic properties: is_historic: type: boolean example: true description: Set true during storage driver registration if driver support fetching historic metrics. This enable internal performance framework to either call driver interface to pull real time metrics or historic time series metrics. resource_metrics: $ref: '#/components/schemas/ResourceMetrics' ResourceMetrics: type: object description: Map of resources and supported metrics of respective resources for storage (driver) additionalProperties: type: array items: type: object description: list of metrics with supported Units and its description properties: unit: type: string description: supported metric unit description: type: string description: storage specific desctiption for respective metric example: storagePool: - throughput: unit: MB/s description: Represents how much data is successfully transferred in MB/s - readThroughput: unit: MB/s description: Represents how much data read is successfully transferred in MB/s - readRequests: unit: IOPS description: Read requests per second responses: HTTPStatus400: description: BadRequest content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' HTTPStatus401: description: NotAuthorized content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' HTTPStatus403: description: Forbidden content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' HTTPStatus404: description: The resource does not exist content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' HTTPStatus409: description: An item already exists content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' HTTPStatus500: description: An unexpected error occured. content: application/json: schema: $ref: '#/components/schemas/ErrorSpec' parameters: storage_id: name: storage_id in: path description: Database ID created for a storage backend. required: true style: simple explode: false schema: type: string native_storage_pool_id: name: native_storage_pool_id in: path description: Actual ID of the storage pool in backend. required: true style: simple explode: false schema: type: string native_volume_id: name: native_volume_id in: path description: Actual ID of the volume in backend. required: true style: simple explode: false schema: type: string limit: name: limit in: query description: Requests a page size of items. Returns a number of items up to a limit value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. required: false style: form explode: true schema: minimum: 1 type: integer format: int32 offset: name: offset in: query description: Used in conjunction with limit to return a slice of items. offset is where to start in the list. required: false style: form explode: true schema: minimum: 0 type: integer format: int32 - desc sequence_number: name: sequence_number in: path description: Sequence number which uniquely maps to the trap sent by a backend. required: true style: simple explode: false schema: type: string requestBodies: SnmpConfigUpdateSpec: content: application/json: schema: $ref: '#/components/schemas/SnmpConfigUpdateSpec' StorageBackendAlertSync: content: application/json: schema: $ref: '#/components/schemas/StorageBackendAlertSync' ================================================ FILE: requirements.txt ================================================ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # pbr should be first pbr!=2.1.0,>=2.0.0 # Apache-2.0 alembic>=0.8.10 # MIT Babel>=2.9.1 # BSD eventlet>=0.31.0 # MIT greenlet>=0.4.10 # MIT jsonschema>=2.6.0 # MIT oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db==11.3.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 paramiko>=2.0.0 # LGPLv2.1+ Paste>=2.0.2 # MIT PasteDeploy>=1.5.0 # MIT retrying!=1.3.0,>=1.2.3 # Apache-2.0 Routes>=2.3.1 # MIT six>=1.10.0 # MIT SQLAlchemy==1.4.44 # MIT stevedore>=1.20.0 # Apache-2.0 tooz==2.8.0 # Apache-2.0 WebOb>=1.7.1 # MIT pysnmp>=4.4.11 # BSD redis>=3.3.8 # MIT cryptography<3.4; # Apache-2.0 pyopenssl==19.1.0 # Apache-2.0 APScheduler~=3.6.3 flask kafka-python importlib-metadata==3.7.0; python_version < "3.8" tenacity==6.3.1 tzlocal<3.0 scp>=0.13.0 defusedxml==0.6.0 xlrd>=2.0.1 ================================================ FILE: script/create_db.py ================================================ #!/usr/bin/env python # Copyright 2020 The SODA Authors. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """db create script for delfin """ import sys from oslo_config import cfg from delfin import db from delfin import version CONF = cfg.CONF def main(): CONF(sys.argv[1:], project='delfin', version=version.version_string()) db.register_db() if __name__ == '__main__': main() ================================================ FILE: script/start.sh ================================================ #!/usr/bin/env bash # Copyright 2020 The SODA Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e case "$1" in api) # Register database schema python3 script/create_db.py --config-file /etc/delfin/delfin.conf # Run API Server exec python3 delfin/cmd/api.py --config-file /etc/delfin/delfin.conf ;; task) exec python3 delfin/cmd/task.py --config-file /etc/delfin/delfin.conf ;; alert) exec python3 delfin/cmd/alert.py --config-file /etc/delfin/delfin.conf ;; exporter) exec python3 delfin/exporter/prometheus/exporter_server.py --config-file /etc/delfin/delfin.conf ;; *) echo "Usage: $0 {api|task|alert|exporter}" >&2 exit 1 ;; esac ================================================ FILE: setup.cfg ================================================ ================================================ FILE: setup.py ================================================ # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup, find_packages setup( name="delfin", version="1.0.0", author="SODA Authors", author_email="Opensds-tech-discuss@lists.opensds.io", license="Apache 2.0", packages=find_packages(exclude=("tests", "tests.*")), python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", entry_points={ 'delfin.alert.exporters': [ 'example = delfin.exporter.example:AlertExporterExample', 'prometheus = delfin.exporter.prometheus.exporter' ':AlertExporterPrometheus', ], 'delfin.performance.exporters': [ 'example = delfin.exporter.example:PerformanceExporterExample', 'prometheus = delfin.exporter.prometheus.exporter' ':PerformanceExporterPrometheus', 'kafka = delfin.exporter.kafka.exporter:PerformanceExporterKafka' ], 'delfin.storage.drivers': [ 'fake_storage fake_driver = delfin.drivers.fake_storage:FakeStorageDriver', 'fujitsu eternus = delfin.drivers.fujitsu.eternus.eternus_stor:EternusDriver', 'dellemc unity = delfin.drivers.dell_emc.unity.unity:UnityStorDriver', 'dellemc vmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver', 'dellemc pmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver', 'dellemc scaleio = delfin.drivers.dell_emc.scaleio.scaleio_stor:ScaleioStorageDriver', 'dellemc vnx_block = delfin.drivers.dell_emc.vnx.vnx_block.vnx_block:VnxBlockStorDriver', 'dellemc vplex = delfin.drivers.dell_emc.vplex.vplex_stor:VplexStorageDriver', 'dellemc powerstore = delfin.drivers.dell_emc.power_store.power_store:PowerStoreDriver', 'hitachi vsp = delfin.drivers.hitachi.vsp.vsp_stor:HitachiVspDriver', 'hpe 3par = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver', 'hpe primera = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver', 'hpe msa = delfin.drivers.hpe.hpe_msa.hpe_msastor:HpeMsaStorDriver', 'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver', 'ibm storwize_svc = delfin.drivers.ibm.storwize_svc.storwize_svc:StorwizeSVCDriver', 'ibm ds8k = delfin.drivers.ibm.ds8k.ds8k:DS8KDriver', 'netapp cmode = delfin.drivers.netapp.dataontap.cluster_mode:NetAppCmodeDriver', 'hitachi hnas = delfin.drivers.hitachi.hnas.hds_nas:HitachiHNasDriver', 'pure flasharray = delfin.drivers.pure.flasharray.pure_flasharray:PureFlashArrayDriver', 'h3c unistor_cf = delfin.drivers.h3c.unistor_cf.unistor_cf:H3cUniStorCfDriver', 'macrosan macrosan = delfin.drivers.macro_san.ms.ms_stor:MacroSanDriver', # AS5500/AS5300/AS2600/AS2200 use the same driver 'inspur as5500 = delfin.drivers.inspur.as5500.as5500:As5500Driver' ] }, ) ================================================ FILE: test-requirements.txt ================================================ coverage!=4.4,>=4.0 # Apache-2.0 ddt>=1.0.1 # MIT fixtures>=3.0.0 # Apache-2.0/BSD iso8601>=0.1.11 # MIT oslotest>=3.2.0 # Apache-2.0 testtools>=2.2.0 # MIT ================================================ FILE: tox.ini ================================================ [tox] skipsdist = True skip_missing_interpreters = True envlist = py3, pep8 [testenv] basepython = python3 usedevelop = True setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = coverage erase coverage run -m unittest discover {posargs:delfin/tests/unit} coverage html -d htmlcov [testenv:pep8] deps = flake8 commands = flake8 {posargs:delfin} flake8 {posargs:installer} [flake8] ignore = E402,W503,W504