Repository: ARM-software/workload-automation Branch: master Commit: 251c263c564b Files: 534 Total size: 2.4 MB Directory structure: gitextract_n5js309u/ ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ ├── feature_request.md │ │ ├── question---support-.md │ │ └── question.md │ └── workflows/ │ └── main.yml ├── .gitignore ├── .readthedocs.yml ├── LICENSE ├── MANIFEST.in ├── README.rst ├── dev_scripts/ │ ├── README │ ├── clean_install │ ├── clear_env │ ├── get_apk_versions │ ├── pep8 │ ├── pylint │ ├── pylint_plugins.py │ ├── rebuild_all_uiauto │ └── update_copyrights ├── doc/ │ ├── Makefile │ ├── build_instrument_method_map.py │ ├── build_plugin_docs.py │ ├── make.bat │ ├── requirements.txt │ └── source/ │ ├── api/ │ │ ├── output.rst │ │ └── workload.rst │ ├── api.rst │ ├── changes.rst │ ├── conf.py │ ├── developer_information/ │ │ ├── developer_guide/ │ │ │ └── writing_plugins.rst │ │ ├── developer_guide.rst │ │ ├── developer_reference/ │ │ │ ├── contributing.rst │ │ │ ├── framework_overview.rst │ │ │ ├── plugins.rst │ │ │ ├── revent.rst │ │ │ └── serialization.rst │ │ ├── developer_reference.rst │ │ ├── how_to.rst │ │ └── how_tos/ │ │ ├── adding_plugins.rst │ │ └── processing_output.rst │ ├── developer_information.rst │ ├── faq.rst │ ├── glossary.rst │ ├── index.rst │ ├── instrument_method_map.template │ ├── migration_guide.rst │ ├── plugins.rst │ ├── user_information/ │ │ ├── how_to.rst │ │ ├── how_tos/ │ │ │ ├── agenda.rst │ │ │ ├── device_setup.rst │ │ │ └── revent.rst │ │ ├── installation.rst │ │ ├── user_guide.rst │ │ ├── user_reference/ │ │ │ ├── agenda.rst │ │ │ ├── configuration.rst │ │ │ ├── invocation.rst │ │ │ ├── output_directory.rst │ │ │ └── runtime_parameters.rst │ │ └── user_reference.rst │ └── user_information.rst ├── extras/ │ ├── Dockerfile │ ├── README │ ├── pylintrc │ └── walog.vim ├── pytest.ini ├── requirements.txt ├── scripts/ │ ├── cpustates │ └── wa ├── setup.py ├── tests/ │ ├── __init__.py │ ├── ci/ │ │ └── idle_agenda.yaml │ ├── data/ │ │ ├── bad-syntax-agenda.yaml │ │ ├── extensions/ │ │ │ └── devices/ │ │ │ └── test_device.py │ │ ├── includes/ │ │ │ ├── agenda.yaml │ │ │ ├── configs/ │ │ │ │ └── test.yaml │ │ │ ├── section-include.yaml │ │ │ ├── sections/ │ │ │ │ ├── section1.yaml │ │ │ │ └── section2.yaml │ │ │ ├── user/ │ │ │ │ └── config.yaml │ │ │ └── workloads.yaml │ │ ├── interrupts/ │ │ │ ├── after │ │ │ ├── before │ │ │ └── result │ │ ├── logcat.2.log │ │ ├── logcat.log │ │ ├── test-agenda.yaml │ │ └── test-config.py │ ├── test_agenda_parser.py │ ├── test_config.py │ ├── test_diff.py │ ├── test_exec_control.py │ ├── test_execution.py │ ├── test_plugin.py │ ├── test_runtime_param_utils.py │ ├── test_signal.py │ └── test_utils.py └── wa/ ├── __init__.py ├── assets/ │ └── bin/ │ ├── arm64/ │ │ └── revent │ └── armeabi/ │ └── revent ├── commands/ │ ├── __init__.py │ ├── create.py │ ├── list.py │ ├── postgres_schemas/ │ │ ├── postgres_schema.sql │ │ ├── postgres_schema_update_v1.2.sql │ │ ├── postgres_schema_update_v1.3.sql │ │ ├── postgres_schema_update_v1.4.sql │ │ ├── postgres_schema_update_v1.5.sql │ │ └── postgres_schema_update_v1.6.sql │ ├── process.py │ ├── report.py │ ├── revent.py │ ├── run.py │ ├── schema_changelog.rst │ ├── show.py │ └── templates/ │ ├── apk_workload │ ├── apkrevent_workload │ ├── apkuiauto_workload │ ├── basic_workload │ ├── revent_workload │ ├── setup.template │ ├── uiauto/ │ │ ├── UiAutomation.java │ │ ├── uiauto_AndroidManifest.xml │ │ ├── uiauto_build.gradle │ │ ├── uiauto_build_script │ │ └── uiauto_workload_template/ │ │ ├── build.gradle │ │ ├── gradle/ │ │ │ └── wrapper/ │ │ │ ├── gradle-wrapper.jar │ │ │ └── gradle-wrapper.properties │ │ ├── gradlew │ │ ├── gradlew.bat │ │ └── settings.gradle │ └── uiauto_workload ├── framework/ │ ├── __init__.py │ ├── command.py │ ├── configuration/ │ │ ├── __init__.py │ │ ├── core.py │ │ ├── default.py │ │ ├── execution.py │ │ ├── parsers.py │ │ ├── plugin_cache.py │ │ └── tree.py │ ├── entrypoint.py │ ├── exception.py │ ├── execution.py │ ├── getters.py │ ├── host.py │ ├── instrument.py │ ├── job.py │ ├── output.py │ ├── output_processor.py │ ├── plugin.py │ ├── pluginloader.py │ ├── resource.py │ ├── run.py │ ├── signal.py │ ├── target/ │ │ ├── __init__.py │ │ ├── assistant.py │ │ ├── config.py │ │ ├── descriptor.py │ │ ├── info.py │ │ ├── manager.py │ │ ├── runtime_config.py │ │ └── runtime_parameter_manager.py │ ├── uiauto/ │ │ ├── app/ │ │ │ ├── build.gradle │ │ │ └── src/ │ │ │ └── main/ │ │ │ ├── AndroidManifest.xml │ │ │ └── java/ │ │ │ └── com/ │ │ │ └── arm/ │ │ │ └── wa/ │ │ │ └── uiauto/ │ │ │ ├── ActionLogger.java │ │ │ ├── ApplaunchInterface.java │ │ │ ├── BaseUiAutomation.java │ │ │ ├── UiAutoUtils.java │ │ │ └── UxPerfUiAutomation.java │ │ ├── build.gradle │ │ ├── build.sh │ │ ├── gradle/ │ │ │ └── wrapper/ │ │ │ ├── gradle-wrapper.jar │ │ │ └── gradle-wrapper.properties │ │ ├── gradlew │ │ ├── gradlew.bat │ │ ├── settings.gradle │ │ └── uiauto.aar │ ├── version.py │ └── workload.py ├── instruments/ │ ├── __init__.py │ ├── delay.py │ ├── dmesg.py │ ├── energy_measurement.py │ ├── fps.py │ ├── hwmon.py │ ├── misc.py │ ├── perf.py │ ├── perfetto.py │ ├── poller/ │ │ ├── Makefile │ │ ├── __init__.py │ │ ├── bin/ │ │ │ ├── arm64/ │ │ │ │ └── poller │ │ │ └── armeabi/ │ │ │ └── poller │ │ └── poller.c │ ├── proc_stat/ │ │ ├── __init__.py │ │ └── gather-load.sh │ ├── screencap.py │ ├── serialmon.py │ └── trace_cmd.py ├── output_processors/ │ ├── __init__.py │ ├── cpustates.py │ ├── csvproc.py │ ├── postgresql.py │ ├── sqlite.py │ ├── status.py │ ├── targz.py │ └── uxperf.py ├── tools/ │ └── revent/ │ ├── Makefile │ └── revent.c ├── utils/ │ ├── __init__.py │ ├── android.py │ ├── cpustates.py │ ├── diff.py │ ├── doc.py │ ├── exec_control.py │ ├── formatter.py │ ├── log.py │ ├── misc.py │ ├── postgres.py │ ├── revent.py │ ├── serializer.py │ ├── terminalsize.py │ ├── trace_cmd.py │ └── types.py └── workloads/ ├── __init__.py ├── adobereader/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.adobereader.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── adobereader/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── aitutu/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.aitutu.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── aitutu/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── androbench/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.androbench.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── androbench/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── angrybirds_rio/ │ └── __init__.py ├── antutu/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.antutu.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── antutu/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── apache.py ├── applaunch/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.applaunch.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── applaunch/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── benchmarkpi/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.benchmarkpi.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── benchmarkpi/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── chrome/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.chrome.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── deepbench/ │ └── __init__.py ├── dhrystone/ │ ├── __init__.py │ ├── bin/ │ │ ├── arm64/ │ │ │ └── dhrystone │ │ └── armeabi/ │ │ └── dhrystone │ └── src/ │ ├── Makefile │ └── dhrystone.c ├── drarm/ │ └── __init__.py ├── exoplayer/ │ └── __init__.py ├── geekbench/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.geekbench.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── geekbench/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── gfxbench/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.gfxbench.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── gfxbench/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── glbenchmark/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.glbenchmark.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── glbenchmark/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── gmail/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.gmail.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── gmail/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── googlemaps/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.googlemaps.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── googlephotos/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.googlephotos.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── googlephotos/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── googleplaybooks/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.googleplaybooks.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── googleplaybooks/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── googleslides/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.googleslides.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── googleslides/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── hackbench/ │ ├── __init__.py │ ├── bin/ │ │ ├── arm64/ │ │ │ └── hackbench │ │ └── armeabi/ │ │ └── hackbench │ └── src/ │ └── LICENSE ├── homescreen/ │ └── __init__.py ├── honorofkings/ │ └── __init__.py ├── hwuitest/ │ └── __init__.py ├── idle.py ├── jankbench/ │ └── __init__.py ├── lmbench/ │ ├── __init__.py │ └── bin/ │ ├── COPYING │ ├── COPYING-2 │ ├── README │ ├── arm64/ │ │ ├── bw_mem │ │ └── lat_mem_rd │ └── armeabi/ │ ├── bw_mem │ └── lat_mem_rd ├── manual/ │ └── __init__.py ├── meabo/ │ └── __init__.py ├── memcpy/ │ ├── __init__.py │ ├── bin/ │ │ ├── arm64/ │ │ │ └── memcpy │ │ └── armeabi/ │ │ └── memcpy │ └── src/ │ ├── build.sh │ └── memcopy.c ├── mongoperf/ │ └── __init__.py ├── motionmark/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.motionmark.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── motionmark/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── openssl/ │ └── __init__.py ├── pcmark/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.pcmark.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── pcmark/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── recentfling/ │ └── __init__.py ├── rt_app/ │ ├── LICENSE │ ├── __init__.py │ ├── bin/ │ │ ├── arm64/ │ │ │ ├── README.rt-app │ │ │ └── rt-app │ │ ├── armeabi/ │ │ │ ├── README.rt-app │ │ │ └── rt-app │ │ ├── ppc64le/ │ │ │ ├── README.rt-app │ │ │ └── rt-app │ │ ├── x86/ │ │ │ ├── README.rt-app │ │ │ └── rt-app │ │ └── x86_64/ │ │ ├── README.rt-app │ │ └── rt-app │ ├── use_cases/ │ │ ├── browser-long.json │ │ ├── browser-short.json │ │ ├── camera-long.json │ │ ├── camera-short.json │ │ ├── mp3-long.json │ │ ├── mp3-short.json │ │ ├── spreading-tasks.json │ │ ├── taskset.json │ │ ├── video-long.json │ │ └── video-short.json │ └── workgen ├── schbench/ │ ├── __init__.py │ ├── bin/ │ │ └── arm64/ │ │ └── schbench │ └── src/ │ └── LICENSE ├── shellscript/ │ └── __init__.py ├── speedometer/ │ ├── LICENSE │ ├── __init__.py │ ├── speedometer_archive-2.0.tar.lzma │ ├── speedometer_archive-2.1.tar.lzma │ └── speedometer_archive-3.0.tar.lzma ├── stress_ng/ │ ├── LICENSE │ ├── __init__.py │ └── bin/ │ ├── arm64/ │ │ └── stress-ng │ └── armeabi/ │ └── stress-ng ├── sysbench/ │ ├── LICENSE │ ├── __init__.py │ └── bin/ │ ├── arm64/ │ │ └── sysbench │ └── armeabi/ │ └── sysbench ├── templerun2/ │ └── __init__.py ├── the_chase/ │ └── __init__.py ├── uibench/ │ └── __init__.py ├── uibenchjanktests/ │ └── __init__.py ├── vellamo/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.vellamo.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── vellamo/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── youtube/ │ ├── __init__.py │ ├── com.arm.wa.uiauto.youtube.apk │ └── uiauto/ │ ├── app/ │ │ ├── build.gradle │ │ └── src/ │ │ └── main/ │ │ ├── AndroidManifest.xml │ │ └── java/ │ │ └── com/ │ │ └── arm/ │ │ └── wa/ │ │ └── uiauto/ │ │ └── youtube/ │ │ └── UiAutomation.java │ ├── build.gradle │ ├── build.sh │ ├── gradle/ │ │ └── wrapper/ │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle └── youtube_playback/ └── __init__.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help resolve an issue. title: '' labels: bug assignees: '' --- **Describe the issue** A clear and concise description of what the bug is. **Run Log** Please attach your `run.log` detailing the issue. **Other comments (optional)** ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: '' labels: enhancement assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. **Describe the solution you'd like** A clear and concise description of what you want to happen. **Additional context** Add any other context about the feature request here. ================================================ FILE: .github/ISSUE_TEMPLATE/question---support-.md ================================================ --- name: 'Question / Support ' about: Ask a question or reqeust support title: '' labels: question assignees: '' --- ** ================================================ FILE: .github/ISSUE_TEMPLATE/question.md ================================================ --- name: Question about: Ask a question title: '' labels: question assignees: '' --- **Describe you query** What would you like to know / what are you trying to achieve? ================================================ FILE: .github/workflows/main.yml ================================================ name: WA Test Suite on: push: branches: [ master ] pull_request: branches: [ master ] types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: 0 2 * * * # Allows runing this workflow manually from the Actions tab workflow_dispatch: jobs: Run-Linters-and-Tests: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - name: Set up Python 3.8.18 uses: actions/setup-python@v2 with: python-version: 3.8.18 - name: git-bash uses: pkg-src/github-action-git-bash@v1.1 - name: Install dependencies run: | python -m pip install --upgrade pip cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install . cd $GITHUB_WORKSPACE && pip install .[test] python -m pip install pylint==2.6.2 pep8 flake8 mock nose - name: Run pylint run: | cd $GITHUB_WORKSPACE && ./dev_scripts/pylint wa/ - name: Run PEP8 run: | cd $GITHUB_WORKSPACE && ./dev_scripts/pep8 wa - name: Run nose tests run: | nosetests Execute-Test-Workload-and-Process: runs-on: ubuntu-22.04 strategy: matrix: python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2] steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: git-bash uses: pkg-src/github-action-git-bash@v1.1 - name: Install dependencies run: | python -m pip install --upgrade pip cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install . cd $GITHUB_WORKSPACE && pip install . - name: Run test workload run: | cd /tmp && wa run $GITHUB_WORKSPACE/tests/ci/idle_agenda.yaml -v -d idle_workload - name: Test Process Command run: | cd /tmp && wa process -f -p csv idle_workload Test-WA-Commands: runs-on: ubuntu-22.04 strategy: matrix: python-version: [3.7.17, 3.8.18, 3.9.21, 3.10.16, 3.13.2] steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: git-bash uses: pkg-src/github-action-git-bash@v1.1 - name: Install dependencies run: | python -m pip install --upgrade pip cd /tmp && git clone https://github.com/ARM-software/devlib.git && cd devlib && pip install . cd $GITHUB_WORKSPACE && pip install . - name: Test Show Command run: | wa show dhrystone && wa show generic_android && wa show trace-cmd && wa show csv - name: Test List Command run: | wa list all - name: Test Create Command run: | wa create agenda dhrystone generic_android csv trace_cmd && wa create package test && wa create workload test ================================================ FILE: .gitignore ================================================ *.egg-info *.pyc *.bak *.o *.cmd *.iml Module.symvers modules.order *~ tags build/ dist/ .ropeproject/ wa_output/ doc/source/plugins/ MANIFEST *.orig local.properties pmu_logger.mod.c .tmp_versions obj/ libs/armeabi **/uiauto/**/build/ **/uiauto/**/.gradle **/uiauto/**/.idea **/uiauto/**/proguard-rules.pro **/uiauto/app/libs/ **/uiauto/*.properties **/uiauto/**/.project **/uiauto/**/.settings **/uiauto/**/.classpath doc/source/developer_information/developer_guide/instrument_method_map.rst doc/source/run_config/ .eggs ================================================ FILE: .readthedocs.yml ================================================ # .readthedocs.yml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Build documentation in the docs/ directory with Sphinx sphinx: builder: html configuration: doc/source/conf.py # Build the docs in additional formats such as PDF and ePub formats: all # Configure the build environment build: os: ubuntu-22.04 tools: python: "3.11" # Ensure doc dependencies are installed before building python: install: - requirements: doc/requirements.txt - method: pip path: . ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: MANIFEST.in ================================================ recursive-include scripts * recursive-include doc * recursive-include wa * ================================================ FILE: README.rst ================================================ [DEPRECATED] Workload Automation ++++++++++++++++++++++++++++++++++++++++++++ ⚠️ **Development of this project has moved to a new repository.** Please visit the new location for the latest code, issues, and contributions: https://gitlab.arm.com/tooling/workload-automation/workload-automation This repository is no longer actively maintained. ------------- Workload Automation (WA) is a framework for executing workloads and collecting measurements on Android and Linux devices. WA includes automation for nearly 40 workloads and supports some common instrumentation (ftrace, hwmon) along with a number of output formats. WA is designed primarily as a developer tool/framework to facilitate data driven development by providing a method of collecting measurements from a device in a repeatable way. WA is highly extensible. Most of the concrete functionality is implemented via plug-ins, and it is easy to write new plug-ins to support new device types, workloads, instruments or output processing. Requirements ============ - Python 3.5+ - Linux (should work on other Unixes, but untested) - Latest Android SDK (ANDROID_HOME must be set) for Android devices, or - SSH for Linux devices Installation ============ To install:: git clone git@github.com:ARM-software/workload-automation.git workload-automation sudo -H python setup [install|develop] Note: A `requirements.txt` is included however this is designed to be used as a reference for known working versions rather than as part of a standard installation. Please refer to the `installation section `_ in the documentation for more details. Basic Usage =========== Please see the `Quickstart `_ section of the documentation. Documentation ============= You can view pre-built HTML documentation `here `_. Documentation in reStructuredText format may be found under ``doc/source``. To compile it into cross-linked HTML, make sure you have `Sphinx `_ installed, and then :: cd doc make html License ======= Workload Automation is distributed under `Apache v2.0 License `_. Workload automation includes binaries distributed under different licenses (see LICENSE files in specific directories). Feedback, Contributions and Support =================================== - Please use the GitHub Issue Tracker associated with this repository for feedback. - ARM licensees may contact ARM directly via their partner managers. - We welcome code contributions via GitHub Pull requests. Please see "Contributing Code" section of the documentation for details. ================================================ FILE: dev_scripts/README ================================================ This directory contains scripts that aid the development of Workload Automation. They were written to work as part of WA development environment and are not guarnteed to work if moved outside their current location. They should not be distributed as part of WA releases. Scripts ------- :clean_install: Performs a clean install of WA from source. This will remove any existing WA install (regardless of whether it was made from source or through a tarball with pip). :clear_env: Clears ~/.workload_automation. :get_apk_versions: Prints out a table of APKs and their versons found under the path specified as the argument. :pep8: Runs flake8 (formerly called "pep8") code checker (must be installed) over wa/ with the correct settings for WA. :pylint: Runs pylint (must be installed) over wlauto with the correct settings for WA. :rebuild_all_uiauto: Rebuild UIAutomator APKs for workloads that have them. This is useful to make sure they're all using the latest uiauto.arr after the latter has been updated. :update_copyrights: Checks and updates the year of the copyright in source files, adding a copyright header if it's not already there. ================================================ FILE: dev_scripts/clean_install ================================================ #!/usr/bin/env python import os import sys import shutil import logging logging.basicConfig(level=logging.INFO) def get_installed_path(): paths = [p for p in sys.path if len(p) > 2] for path in paths: candidate = os.path.join(path, 'wlauto') if os.path.isdir(candidate): return candidate if __name__ == '__main__': installed_path = get_installed_path() if installed_path: logging.info('Removing installed package from {}.'.format(installed_path)) shutil.rmtree(installed_path) if os.path.isdir('build'): logging.info('Removing local build directory.') shutil.rmtree('build') logging.info('Removing *.pyc files.') for root, dirs, files in os.walk('wlauto'): for file in files: if file.lower().endswith('.pyc'): os.remove(os.path.join(root, file)) os.system('python setup.py install') ================================================ FILE: dev_scripts/clear_env ================================================ #!/bin/bash # Clear workload automation user environment. rm -rf ~/.workload_automation/ ================================================ FILE: dev_scripts/get_apk_versions ================================================ #!/usr/bin/env python import os import sys import re import logging import subprocess import argparse sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from wlauto.exceptions import WAError, ToolError from wlauto.utils.doc import format_simple_table def get_aapt_path(): """Return the full path to aapt tool.""" sdk_path = os.getenv('ANDROID_HOME') if not sdk_path: raise ToolError('Please make sure you have Android SDK installed and have ANDROID_HOME set.') build_tools_directory = os.path.join(sdk_path, 'build-tools') versions = os.listdir(build_tools_directory) for version in reversed(sorted(versions)): aapt_path = os.path.join(build_tools_directory, version, 'aapt') if os.path.isfile(aapt_path): logging.debug('Found aapt for version {}'.format(version)) return aapt_path else: raise ToolError('aapt not found. Please make sure at least one Android platform is installed.') def get_apks(path): """Return a list of paths to all APK files found under the specified directory.""" apks = [] for root, dirs, files in os.walk(path): for file in files: _, ext = os.path.splitext(file) if ext.lower() == '.apk': apks.append(os.path.join(root, file)) return apks class ApkVersionInfo(object): def __init__(self, workload=None, package=None, label=None, version_name=None, version_code=None): self.workload = workload self.package = package self.label = label self.version_name = version_name self.version_code = version_code def to_tuple(self): return (self.workload, self.package, self.label, self.version_name, self.version_code) version_regex = re.compile(r"name='(?P[^']+)' versionCode='(?P[^']+)' versionName='(?P[^']+)'") def extract_version_info(apk_path, aapt): command = [aapt, 'dump', 'badging', apk_path] output = subprocess.check_output(command) version_info = ApkVersionInfo(workload=apk_path.split(os.sep)[-2]) for line in output.split('\n'): if line.startswith('application-label:'): version_info.label = line.split(':')[1].strip().replace('\'', '') elif line.startswith('package:'): match = version_regex.search(line) if match: version_info.package = match.group('name') version_info.version_code = match.group('vcode') version_info.version_name = match.group('vname') else: pass # not interested return version_info def get_apk_versions(path, aapt): apks = get_apks(path) versions = [extract_version_info(apk, aapt) for apk in apks] return versions if __name__ == '__main__': try: aapt = get_aapt_path() parser = argparse.ArgumentParser() parser.add_argument('path', metavar='PATH', help='Location to look for APKs.') args = parser.parse_args() versions = get_apk_versions(args.path, aapt) table = format_simple_table([v.to_tuple() for v in versions], align='<<<>>', headers=['workload', 'package', 'name', 'version code', 'version name']) print table except WAError, e: logging.error(e) sys.exit(1) ================================================ FILE: dev_scripts/pep8 ================================================ #!/bin/bash DEFAULT_DIRS=( wa ) EXCLUDE=wa/tests,wa/framework/target/descriptor.py EXCLUDE_COMMA= IGNORE=E501,E265,E266,W391,E401,E402,E731,W503,W605,F401 if ! hash flake8 2>/dev/null; then echo "flake8 not found in PATH" echo "you can install it with \"sudo pip install flake8\"" exit 1 fi if [[ "$1" == "" ]]; then THIS_DIR="`dirname \"$0\"`" pushd $THIS_DIR/.. > /dev/null for dir in "${DEFAULT_DIRS[@]}"; do flake8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $dir done flake8 --exclude=$EXCLUDE --ignore=$IGNORE,E241 $(echo "$EXCLUDE_COMMA" | sed 's/,/ /g') popd > /dev/null else flake8 --exclude=$EXCLUDE,$EXCLUDE_COMMA --ignore=$IGNORE $1 fi ================================================ FILE: dev_scripts/pylint ================================================ #!/bin/bash DEFAULT_DIRS=( wa ) target=$1 compare_versions() { if [[ $1 == $2 ]]; then return 0 fi local IFS=. local i ver1=($1) ver2=($2) for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do ver1[i]=0 done for ((i=0; i<${#ver1[@]}; i++)); do if [[ -z ${ver2[i]} ]]; then ver2[i]=0 fi if ((10#${ver1[i]} > 10#${ver2[i]})); then return 1 fi if ((10#${ver1[i]} < 10#${ver2[i]})); then return 2 fi done return 0 } pylint_version=$(python -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null) if [ "x$pylint_version" == "x" ]; then pylint_version=$(python3 -c 'from pylint.__pkginfo__ import version; print(version)' 2>/dev/null) fi if [ "x$pylint_version" == "x" ]; then pylint_version=$(python3 -c 'from pylint import version; print(version)' 2>/dev/null) fi if [ "x$pylint_version" == "x" ]; then echo "ERROR: no pylint verison found; is it installed?" exit 1 fi compare_versions $pylint_version "1.9.2" result=$? if [ "$result" == "2" ]; then echo "ERROR: pylint version must be at least 1.9.2; found $pylint_version" exit 1 fi set -e THIS_DIR="`dirname \"$0\"`" CWD=$PWD pushd $THIS_DIR > /dev/null if [[ "$target" == "" ]]; then for dir in "${DEFAULT_DIRS[@]}"; do PYTHONPATH=. pylint --rcfile ../extras/pylintrc --load-plugins pylint_plugins ../$dir done else PYTHONPATH=. pylint --rcfile ../extras/pylintrc --load-plugins pylint_plugins $CWD/$target fi popd > /dev/null ================================================ FILE: dev_scripts/pylint_plugins.py ================================================ import sys from astroid import MANAGER from astroid import scoped_nodes IGNORE_ERRORS = { ('attribute-defined-outside-init', ): [ 'wa.workloads', 'wa.instruments', 'wa.output_procesors', ] } def register(linter): pass def transform(mod): for errors, paths in IGNORE_ERRORS.items(): for path in paths: if path in mod.name: text = mod.stream().read() if not text.strip(): return text = text.split(b'\n') # NOTE: doing it this way because the "correct" approach below does not # work. We can get away with this, because in well-formated WA files, # the initial line is the copyright header's blank line. if b'pylint:' in text[0]: msg = 'pylint directive found on the first line of {}; please move to below copyright header' raise RuntimeError(msg.format(mod.name)) char = chr(text[0][0]) if text[0].strip() and char != '#': msg = 'first line of {} is not a comment; is the copyright header missing?' raise RuntimeError(msg.format(mod.name)) text[0] = '# pylint: disable={}'.format(','.join(errors)).encode('utf-8') mod.file_bytes = b'\n'.join(text) # This is what *should* happen, but doesn't work. # text.insert(0, '# pylint: disable=attribute-defined-outside-init') # mod.file_bytes = '\n'.join(text) # mod.tolineno += 1 MANAGER.register_transform(scoped_nodes.Module, transform) ================================================ FILE: dev_scripts/rebuild_all_uiauto ================================================ #!/bin/bash # # This script rebuilds all uiauto APKs as well as the base uiauto.arr. This is # useful when changes have been made to the base uiautomation classes and so # all automation needs to be rebuilt to link against the updated uiauto.arr. set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" BASE_DIR="$SCRIPT_DIR/../wa/framework/uiauto" WORKLOADS_DIR="$SCRIPT_DIR/../wa/workloads" pushd $BASE_DIR > /dev/null echo "building $(pwd)" ./build.sh popd > /dev/null for uiauto_dir in $(find $WORKLOADS_DIR -type d -name uiauto); do pushd $uiauto_dir > /dev/null if [ -f build.sh ]; then echo "building $(pwd)" ./build.sh fi popd > /dev/null done ================================================ FILE: dev_scripts/update_copyrights ================================================ #!/usr/bin/env python # # Script to put copyright headers into source files. # import argparse import logging import os import re import string import subprocess from datetime import datetime SOURCE_EXTENSIONS = { '.py': ('#', '#', '#'), '.sh': ('#', '#', '#'), '.java': ('/*', '*/', ' *'), '.c': ('/*', '*/', ' *'), '.h': ('/*', '*/', ' *'), '.cpp': ('/*', '*/', ' *'), } OLD_HEADER_TEMPLATE = string.Template( """${begin_symbol} $$Copyright: ${symbol} ---------------------------------------------------------------- ${symbol} This confidential and proprietary software may be used only as ${symbol} authorised by a licensing agreement from ARM Limited ${symbol} (C) COPYRIGHT ${year} ARM Limited ${symbol} ALL RIGHTS RESERVED ${symbol} The entire notice above must be reproduced on all authorised ${symbol} copies and copies may only be made to the extent permitted ${symbol} by a licensing agreement from ARM Limited. ${symbol} ---------------------------------------------------------------- ${symbol} File: ${file} ${symbol} ---------------------------------------------------------------- ${symbol} $$ ${end_symbol} """ ) HEADER_TEMPLATE = string.Template( """${begin_symbol} Copyright ${year} ARM Limited ${symbol} ${symbol} Licensed under the Apache License, Version 2.0 (the "License"); ${symbol} you may not use this file except in compliance with the License. ${symbol} You may obtain a copy of the License at ${symbol} ${symbol} http://www.apache.org/licenses/LICENSE-2.0 ${symbol} ${symbol} Unless required by applicable law or agreed to in writing, software ${symbol} distributed under the License is distributed on an "AS IS" BASIS, ${symbol} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ${symbol} See the License for the specific language governing permissions and ${symbol} limitations under the License. ${end_symbol} """ ) # Minimum length, in characters, of a copy right header. MIN_HEADER_LENGTH = 500 OLD_COPYRIGHT_REGEX = re.compile(r'\(C\) COPYRIGHT\s+(?:(\d+)-)?(\d+)') COPYRIGHT_REGEX = re.compile(r'Copyright\s+(?:(\d+)\s*[-,]\s*)?(\d+) ARM Limited') DEFAULT_EXCLUDE_PATHS = [ os.path.join('wa', 'commands', 'templates'), ] logging.basicConfig(level=logging.INFO, format='%(levelname)-8s %(message)s') def remove_old_copyright(filepath): begin_symbol, end_symbol, symbol = SOURCE_EXTENSIONS[ext.lower()] header = HEADER_TEMPLATE.substitute(begin_symbol=begin_symbol, end_symbol=end_symbol, symbol=symbol, year='0', file=os.path.basename(filepath)) header_line_count = len(header.splitlines()) with open(filepath) as fh: lines = fh.readlines() for i, line in enumerate(lines): if OLD_COPYRIGHT_REGEX.search(line): start_line = i -4 break lines = lines[0:start_line] + lines[start_line + header_line_count:] return ''.join(lines) def add_copyright_header(filepath, year): _, ext = os.path.splitext(filepath) begin_symbol, end_symbol, symbol = SOURCE_EXTENSIONS[ext.lower()] with open(filepath) as fh: text = fh.read() match = OLD_COPYRIGHT_REGEX.search(text) if match: _, year = update_year(text, year, copyright_regex=OLD_COPYRIGHT_REGEX) text = remove_old_copyright(filepath) header = HEADER_TEMPLATE.substitute(begin_symbol=begin_symbol, end_symbol=end_symbol, symbol=symbol, year=year) if text.strip().startswith('#!') or text.strip().startswith('# -*-'): first_line, rest = text.split('\n', 1) updated_text = '\n'.join([first_line, header, rest]) else: updated_text = '\n'.join([header, text]) with open(filepath, 'w') as wfh: wfh.write(updated_text) def update_year(text, year, copyright_regex=COPYRIGHT_REGEX, match=None): if match is None: match = copyright_regex.search(text) old_year = match.group(1) or match.group(2) updated_year_text = 'Copyright {}-{} ARM Limited'.format(old_year, year) if old_year == year: ret_year = '{}'.format(year) else: ret_year = '{}-{}'.format(old_year, year) return (text.replace(match.group(0), updated_year_text), ret_year) def get_git_year(path): info = subprocess.check_output('git log -n 1 {}'.format(os.path.basename(path)), shell=True, cwd=os.path.dirname(path)) if not info.strip(): return None i = 1 while 'copyright' in info.lower(): info = subprocess.check_output('git log -n 1 --skip {} {}'.format(i, os.path.basename(path)), shell=True, cwd=os.path.dirname(path)) if not info.strip(): return None info_split_lines = info.split('\n') info_split_words = info_split_lines[2].split() return int(info_split_words[5]) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', help='Location to add copyrights to source files in.') parser.add_argument('-n', '--update-no-ext', action='store_true', help='Will update files with on textension using # as the comment symbol.') parser.add_argument('-x', '--exclude', action='append', help='Exclude this directory form the scan. May be used multiple times.') args = parser.parse_args() if args.update_no_ext: SOURCE_EXTENSIONS[''] = ('#', '#', '#') exclude_paths = DEFAULT_EXCLUDE_PATHS + (args.exclude or []) current_year = datetime.now().year for root, dirs, files in os.walk(args.path): should_skip = False for exclude_path in exclude_paths: if exclude_path in os.path.realpath(root): should_skip = True break if should_skip: logging.info('Skipping {}'.format(root)) continue logging.info('Checking {}'.format(root)) for entry in files: _, ext = os.path.splitext(entry) if ext.lower() in SOURCE_EXTENSIONS: filepath = os.path.join(root, entry) should_skip = False for exclude_path in exclude_paths: if exclude_path in os.path.realpath(filepath): should_skip = True break if should_skip: logging.info('\tSkipping {}'.format(entry)) continue with open(filepath) as fh: text = fh.read() if not text.strip(): logging.info('\tSkipping empty {}'.format(entry)) continue year_modified = get_git_year(filepath) or current_year if len(text) < MIN_HEADER_LENGTH: logging.info('\tAdding header to {}'.format(entry)) add_copyright_header(filepath, year_modified) else: first_chunk = text[:MIN_HEADER_LENGTH] match = COPYRIGHT_REGEX.search(first_chunk) if not match: if OLD_COPYRIGHT_REGEX.search(first_chunk): logging.warn('\tOld copyright message detected and replaced in {}'.format(entry)) add_copyright_header(filepath, year_modified) elif '(c)' in first_chunk or '(C)' in first_chunk: logging.warn('\tAnother copyright header appears to be in {}'.format(entry)) else: logging.info('\tAdding header to {}'.format(entry)) add_copyright_header(filepath, current_year) else: # Found an existing copyright header. Update the # year if needed, otherwise, leave it alone. last_year = int(match.group(2)) if year_modified > last_year: logging.info('\tUpdating year in {}'.format(entry)) text, _ = update_year(text, year_modified, COPYRIGHT_REGEX, match) with open(filepath, 'w') as wfh: wfh.write(text) else: logging.info('\t{}: OK'.format(entry)) ================================================ FILE: doc/Makefile ================================================ # Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build SPHINXAPI = sphinx-apidoc SPHINXAPIOPTS = WAEXT = ./build_plugin_docs.py WAEXTOPTS = source/plugins ../wa ../wa/tests ../wa/framework # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run documentation coverage checks" clean: rm -rf $(BUILDDIR)/* rm -rf source/plugins/* rm -rf source/developer_guide/instrument_method_map.rst rm -rf source/run_config/* coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo @echo "Build finished. The coverage reports are in $(BUILDDIR)/coverage." html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/WorkloadAutomation2.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/WorkloadAutomation2.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/WorkloadAutomation2" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/WorkloadAutomation2" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ================================================ FILE: doc/build_instrument_method_map.py ================================================ #!/usr/bin/env python # Copyright 2015-2019 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import string from copy import copy from wa.framework.instrument import SIGNAL_MAP from wa.framework.signal import CallbackPriority from wa.utils.doc import format_simple_table OUTPUT_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), 'source', 'instrument_method_map.template') def generate_instrument_method_map(outfile): signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.items()], headers=['method name', 'signal'], align='<<') decorator_names = map(lambda x: x.replace('high', 'fast').replace('low', 'slow'), CallbackPriority.names) priority_table = format_simple_table(zip(decorator_names, CallbackPriority.names, CallbackPriority.values), headers=['decorator', 'CallbackPriority name', 'CallbackPriority value'], align='<>') with open(OUTPUT_TEMPLATE_FILE) as fh: template = string.Template(fh.read()) with open(outfile, 'w') as wfh: wfh.write(template.substitute(signal_names=signal_table, priority_prefixes=priority_table)) if __name__ == '__main__': generate_instrument_method_map(sys.argv[1]) ================================================ FILE: doc/build_plugin_docs.py ================================================ #!/usr/bin/env python # Copyright 2014-2019 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys from wa import pluginloader from wa.framework.configuration.core import RunConfiguration, MetaConfiguration from wa.framework.target.descriptor import list_target_descriptions from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin, get_params_rst, underline, line_break) from wa.utils.misc import capitalize GENERATE_FOR_PACKAGES = [ 'wa.workloads', 'wa.instruments', 'wa.output_processors', ] def insert_contents_table(title='', depth=1): """ Insert a sphinx directive to insert a contents page with a configurable title and depth. """ text = '''\n .. contents:: {} :depth: {} :local:\n '''.format(title, depth) return text def generate_plugin_documentation(source_dir, outdir, ignore_paths): # pylint: disable=unused-argument pluginloader.clear() pluginloader.update(packages=GENERATE_FOR_PACKAGES) if not os.path.exists(outdir): os.mkdir(outdir) for ext_type in pluginloader.kinds: outfile = os.path.join(outdir, '{}s.rst'.format(ext_type)) with open(outfile, 'w') as wfh: wfh.write('.. _{}s:\n\n'.format(ext_type.replace('_', '-'))) title = ' '.join([capitalize(w) for w in ext_type.split('_')]) wfh.write(underline('{}s'.format(title))) wfh.write(insert_contents_table()) wfh.write(line_break()) exts = pluginloader.list_plugins(ext_type) sorted_exts = iter(sorted(exts, key=lambda x: x.name)) try: wfh.write(get_rst_from_plugin(next(sorted_exts))) except StopIteration: return for ext in sorted_exts: wfh.write(line_break()) wfh.write(get_rst_from_plugin(ext)) def generate_target_documentation(outdir): targets_to_generate = ['generic_android', 'generic_linux', 'generic_chromeos', 'generic_local', 'juno_linux', 'juno_android'] intro = ( '\nThis is a list of commonly used targets and their device ' 'parameters, to see a complete for a complete reference please use the' ' WA :ref:`list command `.\n\n\n' ) pluginloader.clear() pluginloader.update(packages=['wa.framework.target.descriptor']) target_descriptors = list_target_descriptions(pluginloader) outfile = os.path.join(outdir, 'targets.rst') with open(outfile, 'w') as wfh: wfh.write(underline('Common Targets')) wfh.write(intro) for td in sorted(target_descriptors, key=lambda t: t.name): if td.name not in targets_to_generate: continue text = underline(td.name, '~') if hasattr(td, 'description'): desc = strip_inlined_text(td.description or '') text += desc text += underline('Device Parameters:', '-') text += get_params_rst(td.conn_params) text += get_params_rst(td.platform_params) text += get_params_rst(td.target_params) text += get_params_rst(td.assistant_params) wfh.write(text) def generate_run_config_documentation(outdir): generate_config_documentation(RunConfiguration, outdir) def generate_meta_config_documentation(outdir): generate_config_documentation(MetaConfiguration, outdir) def generate_config_documentation(config, outdir): if not os.path.exists(outdir): os.mkdir(outdir) config_name = '_'.join(config.name.split()) outfile = os.path.join(outdir, '{}.rst'.format(config_name)) with open(outfile, 'w') as wfh: wfh.write(get_params_rst(config.config_points)) if __name__ == '__main__': generate_plugin_documentation(sys.argv[2], sys.argv[1], sys.argv[3:]) ================================================ FILE: doc/make.bat ================================================ @ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 2> nul if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\devlib.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\devlib.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end ================================================ FILE: doc/requirements.txt ================================================ nose numpy pandas sphinx_rtd_theme==1.0.0 sphinx==4.2 docutils<0.18 devlib @ git+https://github.com/ARM-software/devlib@master ================================================ FILE: doc/source/api/output.rst ================================================ .. _output_processing_api: Output ====== A WA output directory can be accessed via a :class:`RunOutput` object. There are two ways of getting one -- either instantiate it with a path to a WA output directory, or use :func:`discover_wa_outputs` to traverse a directory tree iterating over all WA output directories found. .. function:: discover_wa_outputs(path) Recursively traverse ``path`` looking for WA output directories. Return an iterator over :class:`RunOutput` objects for each discovered output. :param path: The directory to scan for WA output .. class:: RunOutput(path) The main interface into a WA output directory. :param path: must be the path to the top-level output directory (the one containing ``__meta`` subdirectory and ``run.log``). WA output stored in a Postgres database by the ``Postgres`` output processor can be accessed via a :class:`RunDatabaseOutput` which can be initialized as follows: .. class:: RunDatabaseOutput(password, host='localhost', user='postgres', port='5432', dbname='wa', run_uuid=None, list_runs=False) The main interface into Postgres database containing WA results. :param password: The password used to authenticate with :param host: The database host address. Defaults to ``'localhost'`` :param user: The user name used to authenticate with. Defaults to ``'postgres'`` :param port: The database connection port number. Defaults to ``'5432'`` :param dbname: The database name. Defaults to ``'wa'`` :param run_uuid: The ``run_uuid`` to identify the selected run :param list_runs: Will connect to the database and will print out the available runs with their corresponding run_uuids. Defaults to ``False`` Example ------- .. seealso:: :ref:`processing_output` To demonstrate how we can use the output API if we have an existing WA output called ``wa_output`` in the current working directory we can initialize a ``RunOutput`` as follows: .. code-block:: python In [1]: from wa import RunOutput ...: ...: output_directory = 'wa_output' ...: run_output = RunOutput(output_directory) Alternatively if the results have been stored in a Postgres database we can initialize a ``RunDatabaseOutput`` as follows: .. code-block:: python In [1]: from wa import RunDatabaseOutput ...: ...: db_settings = { ...: host: 'localhost', ...: port: '5432', ...: dbname: 'wa' ...: user: 'postgres', ...: password: 'wa' ...: } ...: ...: RunDatabaseOutput(list_runs=True, **db_settings) Available runs are: ========= ============ ============= =================== =================== ==================================== Run Name Project Project Stage Start Time End Time run_uuid ========= ============ ============= =================== =================== ==================================== Test Run my_project None 2018-11-29 14:53:08 2018-11-29 14:53:24 aa3077eb-241a-41d3-9610-245fd4e552a9 run_1 my_project None 2018-11-29 14:53:34 2018-11-29 14:53:37 4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a ========= ============ ============= =================== =================== ==================================== In [2]: run_uuid = '4c2885c9-2f4a-49a1-bbc5-b010f8d6b12a' ...: run_output = RunDatabaseOutput(run_uuid=run_uuid, **db_settings) From here we can retrieve various information about the run. For example if we want to see what the overall status of the run was, along with the runtime parameters and the metrics recorded from the first job was we can do the following: .. code-block:: python In [2]: run_output.status Out[2]: OK(7) # List all of the jobs for the run In [3]: run_output.jobs Out[3]: [, , , ] # Examine the first job that was ran In [4]: job_1 = run_output.jobs[0] In [5]: job_1.label Out[5]: u'dhrystone' # Print out all the runtime parameters and their values for this job In [6]: for k, v in job_1.spec.runtime_parameters.items(): ...: print (k, v) (u'airplane_mode': False) (u'brightness': 100) (u'governor': 'userspace') (u'big_frequency': 1700000) (u'little_frequency': 1400000) # Print out all the metrics available for this job In [7]: job_1.metrics Out[7]: [, , , , , , , , , , , , , , ] # Load the run results csv file into pandas In [7]: pd.read_csv(run_output.get_artifact_path('run_result_csv')) Out[7]: id workload iteration metric value units 0 450000-wk1 dhrystone 1 thread 0 score 1.442310e+07 NaN 1 450000-wk1 dhrystone 1 thread 0 DMIPS 8.209700e+04 NaN 2 450000-wk1 dhrystone 1 thread 1 score 1.442310e+07 NaN 3 450000-wk1 dhrystone 1 thread 1 DMIPS 8.720900e+04 NaN ... We can also retrieve information about the target that the run was performed on for example: .. code-block:: python # Print out the target's abi: In [9]: run_output.target_info.abi Out[9]: u'arm64' # The os the target was running In [9]: run_output.target_info.os Out[9]: u'android' # And other information about the os version In [10]: run_output.target_info.os_version Out[10]: OrderedDict([(u'all_codenames', u'REL'), (u'incremental', u'3687331'), (u'preview_sdk', u'0'), (u'base_os', u''), (u'release', u'7.1.1'), (u'codename', u'REL'), (u'security_patch', u'2017-03-05'), (u'sdk', u'25')]) :class:`RunOutput` ------------------ :class:`RunOutput` provides access to the output of a WA :term:`run`, including metrics, artifacts, metadata, and configuration. It has the following attributes: ``jobs`` A list of :class:`JobOutput` objects for each job that was executed during the run. ``status`` Run status. This indicates whether the run has completed without problems (``Status.OK``) or if there were issues. ``metrics`` A list of :class:`Metric`\ s for the run. .. note:: these are *overall run* metrics only. Metrics for individual jobs are contained within the corresponding :class:`JobOutput`\ s. ``artifacts`` A list of :class:`Artifact`\ s for the run. These are usually backed by a file and can contain traces, raw data, logs, etc. .. note:: these are *overall run* artifacts only. Artifacts for individual jobs are contained within the corresponding :class:`JobOutput`\ s. ``info`` A :ref:`RunInfo ` object that contains information about the run itself for example it's duration, name, uuid etc. ``target_info`` A :ref:`TargetInfo ` object which can be used to access various information about the target that was used during the run for example it's ``abi``, ``hostname``, ``os`` etc. ``run_config`` A :ref:`RunConfiguration ` object that can be used to access all the configuration of the run itself, for example the ``reboot_policy``, ``execution_order``, ``device_config`` etc. ``classifiers`` :ref:`classifiers ` defined for the entire run. ``metadata`` :ref:`metadata ` associated with the run. ``events`` A list of any events logged during the run, that are not associated with a particular job. ``event_summary`` A condensed summary of any events that occurred during the run. ``augmentations`` A list of the :term:`augmentation`\ s that were enabled during the run (these augmentations may or may not have been active for a particular job). ``basepath`` A (relative) path to the WA output directory backing this object. methods ~~~~~~~ .. method:: RunOutput.get_artifact(name) Return the :class:`Artifact` specified by ``name``. This will only look at the run artifacts; this will not search the artifacts of the individual jobs. :param name: The name of the artifact who's path to retrieve. :return: The :class:`Artifact` with that name :raises HostError: If the artifact with the specified name does not exist. .. method:: RunOutput.get_artifact_path(name) Return the path to the file backing the artifact specified by ``name``. This will only look at the run artifacts; this will not search the artifacts of the individual jobs. :param name: The name of the artifact who's path to retrieve. :return: The path to the artifact :raises HostError: If the artifact with the specified name does not exist. .. method:: RunOutput.get_metric(name) Return the :class:`Metric` associated with the run (not the individual jobs) with the specified `name`. :return: The :class:`Metric` object for the metric with the specified name. .. method:: RunOutput.get_job_spec(spec_id) Return the :class:`JobSpec` with the specified `spec_id`. A :term:`spec` describes the job to be executed. Each :class:`Job` has an associated :class:`JobSpec`, though a single :term:`spec` can be associated with multiple :term:`job`\ s (If the :term:`spec` specifies multiple iterations). .. method:: RunOutput.list_workloads() List unique workload labels that featured in this run. The labels will be in the order in which they first ran. :return: A list of `str` labels of workloads that were part of this run. .. method:: RunOutput.add_classifier(name, value, overwrite=False) Add a classifier to the run as a whole. If a classifier with the specified ``name`` already exists, a``ValueError`` will be raised, unless `overwrite=True` is specified. :class:`RunDatabaseOutput` --------------------------- :class:`RunDatabaseOutput` provides access to the output of a WA :term:`run`, including metrics,artifacts, metadata, and configuration stored in a postgres database. The majority of attributes and methods are the same :class:`RunOutput` however the noticeable differences are: ``jobs`` A list of :class:`JobDatabaseOutput` objects for each job that was executed during the run. ``basepath`` A representation of the current database and host information backing this object. methods ~~~~~~~ .. method:: RunDatabaseOutput.get_artifact(name) Return the :class:`Artifact` specified by ``name``. This will only look at the run artifacts; this will not search the artifacts of the individual jobs. The `path` attribute of the :class:`Artifact` will be set to the Database OID of the object. :param name: The name of the artifact who's path to retrieve. :return: The :class:`Artifact` with that name :raises HostError: If the artifact with the specified name does not exist. .. method:: RunDatabaseOutput.get_artifact_path(name) If the artifcat is a file this method returns a `StringIO` object containing the contents of the artifact specified by ``name``. If the aritifcat is a directory, the method returns a path to a locally extracted version of the directory which is left to the user to remove after use. This will only look at the run artifacts; this will not search the artifacts of the individual jobs. :param name: The name of the artifact who's path to retrieve. :return: A `StringIO` object with the contents of the artifact :raises HostError: If the artifact with the specified name does not exist. :class:`JobOutput` ------------------ :class:`JobOutput` provides access to the output of a single :term:`job` executed during a WA :term:`run`, including metrics, artifacts, metadata, and configuration. It has the following attributes: ``status`` Job status. This indicates whether the job has completed without problems (``Status.OK``) or if there were issues. .. note:: Under typical configuration, WA will make a number of attempts to re-run a job in case of issue. This status (and the rest of the output) will represent the the latest attempt. I.e. a ``Status.OK`` indicates that the latest attempt was successful, but it does mean that there weren't prior failures. You can check the ``retry`` attribute (see below) to whether this was the first attempt or not. ``retry`` Retry number for this job. If a problem is detected during job execution, the job will be re-run up to :confval:`max_retries` times. This indicates the final retry number for the output. A value of ``0`` indicates that the job succeeded on the first attempt, and no retries were necessary. .. note:: Outputs for previous attempts are moved into ``__failed`` subdirectory of WA output. These are currently not exposed via the API. ``id`` The ID of the :term:`spec` associated with with job. This ID is unique to the spec, but not necessary to the job -- jobs representing multiple iterations of the same spec will share the ID. ``iteration`` The iteration number of this job. Together with the ``id`` (above), this uniquely identifies a job with a run. ``label`` The workload label associated with this job. Usually, this will be the name or :term:`alias` of the workload, however maybe overwritten by the user in the :term:`agenda`. ``metrics`` A list of :class:`Metric`\ s for the job. ``artifacts`` A list of :class:`Artifact`\ s for the job These are usually backed by a file and can contain traces, raw data, logs, etc. ``classifiers`` :ref:`classifiers ` defined for the job. ``metadata`` :ref:`metadata ` associated with the job. ``events`` A list of any events logged during the execution of the job. ``event_summary`` A condensed summary of any events that occurred during the execution of the job. ``augmentations`` A list of the :term:`augmentation`\ s that were enabled for this job. This may be different from overall augmentations specified for the run, as they may be enabled/disabled on per-job basis. ``basepath`` A (relative) path to the WA output directory backing this object. methods ~~~~~~~ .. method:: JobOutput.get_artifact(name) Return the :class:`Artifact` specified by ``name`` associated with this job. :param name: The name of the artifact to retrieve. :return: The :class:`Artifact` with that name :raises HostError: If the artifact with the specified name does not exist. .. method:: JobOutput.get_artifact_path(name) Return the path to the file backing the artifact specified by ``name``, associated with this job. :param name: The name of the artifact who's path to retrieve. :return: The path to the artifact :raises HostError: If the artifact with the specified name does not exist. .. method:: JobOutput.get_metric(name) Return the :class:`Metric` associated with this job with the specified `name`. :return: The :class:`Metric` object for the metric with the specified name. .. method:: JobOutput.add_classifier(name, value, overwrite=False) Add a classifier to the job. The classifier will be propagated to all existing artifacts and metrics, as well as those added afterwards. If a classifier with the specified ``name`` already exists, a ``ValueError`` will be raised, unless `overwrite=True` is specified. :class:`JobDatabaseOutput` --------------------------- :class:`JobOutput` provides access to the output of a single :term:`job` executed during a WA :term:`run`, including metrics, artifacts, metadata, and configuration stored in a postgres database. The majority of attributes and methods are the same :class:`JobOutput` however the noticeable differences are: ``basepath`` A representation of the current database and host information backing this object. methods ~~~~~~~ .. method:: JobDatabaseOutput.get_artifact(name) Return the :class:`Artifact` specified by ``name`` associated with this job. The `path` attribute of the :class:`Artifact` will be set to the Database OID of the object. :param name: The name of the artifact to retrieve. :return: The :class:`Artifact` with that name :raises HostError: If the artifact with the specified name does not exist. .. method:: JobDatabaseOutput.get_artifact_path(name) If the artifcat is a file this method returns a `StringIO` object containing the contents of the artifact specified by ``name`` associated with this job. If the aritifcat is a directory, the method returns a path to a locally extracted version of the directory which is left to the user to remove after use. :param name: The name of the artifact who's path to retrieve. :return: A `StringIO` object with the contents of the artifact :raises HostError: If the artifact with the specified name does not exist. :class:`Metric` --------------- A metric represent a single numerical measurement/score collected as a result of running the workload. It would be generated either by the workload or by one of the augmentations active during the execution of the workload. A :class:`Metric` has the following attributes: ``name`` The name of the metric. .. note:: A name of the metric is not necessarily unique, even for the same job. Some workloads internally run multiple sub-tests, each generating a metric with the same name. In such cases, :term:`classifier`\ s are used to distinguish between them. ``value`` The value of the metrics collected. ``units`` The units of the metrics. This maybe ``None`` if the metric has no units. ``lower_is_better`` The default assumption is that higher metric values are better. This may be overridden by setting this to ``True``, e.g. if metrics such as "run time" or "latency". WA does not use this internally (at the moment) but this may be used by external parties to sensibly process WA results in a generic way. ``classifiers`` These can be user-defined :term:`classifier`\ s propagated from the job/run, or they may have been added by the workload to help distinguish between otherwise identical metrics. ``label`` This is a string constructed from the name and classifiers, to provide a more unique identifier, e.g. for grouping values across iterations. The format is in the form ``name/cassifier1=value1/classifier2=value2/...``. :class:`Artifact` ----------------- An artifact is a file that is created on the host as part of executing a workload. This could be trace, logging, raw output, or pretty much anything else. Pretty much every file under WA output directory that is not already represented by some other framework object will have an :class:`Artifact` associated with it. An :class:`Artifact` has the following attributes: ``name`` The name of this artifact. This will be unique for the job/run (unlike metric names). This is intended as a consistent "handle" for this artifact. The actual file name for the artifact may vary from job to job (e.g. some benchmarks that create files with results include timestamps in the file names), however the name will always be the same. ``path`` Partial path to the file associated with this artifact. Often, this is just the file name. To get the complete path that maybe used to access the file, use :func:`get_artifact_path` of the corresponding output object. ``kind`` Describes the nature of this artifact to facilitate generic processing. Possible kinds are: :log: A log file. Not part of the "output" as such but contains information about the run/workload execution that be useful for diagnostics/meta analysis. :meta: A file containing metadata. This is not part of the "output", but contains information that may be necessary to reproduce the results (contrast with ``log`` artifacts which are *not* necessary). :data: This file contains new data, not available otherwise and should be considered part of the "output" generated by WA. Most traces would fall into this category. :export: Exported version of results or some other artifact. This signifies that this artifact does not contain any new data that is not available elsewhere and that it may be safely discarded without losing information. :raw: Signifies that this is a raw dump/log that is normally processed to extract useful information and is then discarded. In a sense, it is the opposite of ``export``, but in general may also be discarded. .. note:: Whether a file is marked as ``log``/``data`` or ``raw`` depends on how important it is to preserve this file, e.g. when archiving, vs how much space it takes up. Unlike ``export`` artifacts which are (almost) always ignored by other exporters as that would never result in data loss, ``raw`` files *may* be processed by exporters if they decided that the risk of losing potentially (though unlikely) useful data is greater than the time/space cost of handling the artifact (e.g. a database uploader may choose to ignore ``raw`` artifacts, where as a network filer archiver may choose to archive them). .. note:: The kind parameter is intended to represent the logical function of a particular artifact, not it's intended means of processing -- this is left entirely up to the output processors. ``description`` This may be used by the artifact's creator to provide additional free-form information about the artifact. In practice, this is often ``None`` ``classifiers`` Job- and run-level :term:`classifier`\ s will be propagated to the artifact. Additional run info ------------------- :class:`RunOutput` object has ``target_info`` and ``run_info`` attributes that contain structures that provide additional information about the run and device. .. _target-info-api: :class:`TargetInfo` ~~~~~~~~~~~~~~~~~~~ The :class:`TargetInfo` class presents various pieces of information about the target device. An instance of this class will be instantiated and populated automatically from the devlib `target `_ created during a WA run and serialized to a json file as part of the metadata exported by WA at the end of a run. The available attributes of the class are as follows: ``target`` The name of the target class that was uised ot interact with the device during the run E.g. ``"AndroidTarget"``, ``"LinuxTarget"`` etc. ``modules`` A list of names of modules that have been loaded by the target. Modules provide additional functionality, such as access to ``cpufreq`` and which modules are installed may impact how much of the ``TargetInfo`` has been populated. ``cpus`` A list of :class:`CpuInfo` objects describing the capabilities of each CPU. ``os`` A generic name of the OS the target was running (e.g. ``"android"``). ``os_version`` A dict that contains a mapping of OS version elements to their values. This mapping is OS-specific. ``abi`` The ABI of the target device. ``hostname`` The hostname of the the device the run was executed on. ``is_rooted`` A boolean value specifying whether root was detected on the device. ``kernel_version`` The version of the kernel on the target device. This returns a :class:`KernelVersion` instance that has separate version and release fields. ``kernel_config`` A :class:`KernelConfig` instance that contains parsed kernel config from the target device. This may be ``None`` if the kernel config could not be extracted. ``sched_features`` A list of the available tweaks to the scheduler, if available from the device. ``hostid`` The unique identifier of the particular device the WA run was executed on. .. _run-info-api: :class:`RunInfo` ~~~~~~~~~~~~~~~~ The :class:`RunInfo` provides general run information. It has the following attributes: ``uuid`` A unique identifier for that particular run. ``run_name`` The name of the run (if provided) ``project`` The name of the project the run belongs to (if provided) ``project_stage`` The project stage the run is associated with (if provided) ``duration`` The length of time the run took to complete. ``start_time`` The time the run was stared. ``end_time`` The time at which the run finished. ================================================ FILE: doc/source/api/workload.rst ================================================ .. _workloads-api: Workloads ~~~~~~~~~ .. _workload-api: Workload ^^^^^^^^ The base :class:`Workload` interface is as follows, and is the base class for all :ref:`workload types `. For more information about to implement your own workload please see the :ref:`Developer How Tos `. All instances of a workload will have the following attributes: ``name`` This identifies the workload (e.g. it is used to specify the workload in the :ref:`agenda `). ``phones_home`` This can be set to True to mark that this workload poses a risk of exposing information to the outside world about the device it runs on. For example a benchmark application that sends scores and device data to a database owned by the maintainer. ``requires_network`` Set this to ``True`` to mark the the workload will fail without a network connection, this enables it to fail early with a clear message. ``asset_directory`` Set this to specify a custom directory for assets to be pushed to, if unset the working directory will be used. ``asset_files`` This can be used to automatically deploy additional assets to the device. If required the attribute should contain a list of file names that are required by the workload which will be attempted to be found by the resource getters methods """"""" .. method:: Workload.init_resources(context) This method may be optionally overridden to implement dynamic resource discovery for the workload. This method executes early on, before the device has been initialized, so it should only be used to initialize resources that do not depend on the device to resolve. This method is executed once per run for each workload instance. :param context: The :ref:`Context ` for the current run. .. method:: Workload.validate(context) This method can be used to validate any assumptions your workload makes about the environment (e.g. that required files are present, environment variables are set, etc) and should raise a :class:`wa.WorkloadError ` if that is not the case. The base class implementation only makes sure sure that the name attribute has been set. :param context: The :ref:`Context ` for the current run. .. method:: Workload.initialize(context) This method is decorated with the ``@once_per_instance`` decorator, (for more information please see :ref:`Execution Decorators `) therefore it will be executed exactly once per run (no matter how many instances of the workload there are). It will run after the device has been initialized, so it may be used to perform device-dependent initialization that does not need to be repeated on each iteration (e.g. as installing executables required by the workload on the device). :param context: The :ref:`Context ` for the current run. .. method:: Workload.setup(context) Everything that needs to be in place for workload execution should be done in this method. This includes copying files to the device, starting up an application, configuring communications channels, etc. :param context: The :ref:`Context ` for the current run. .. method:: Workload.setup_rerun(context) Everything that needs to be in place for workload execution should be done in this method. This includes copying files to the device, starting up an application, configuring communications channels, etc. :param context: The :ref:`Context ` for the current run. .. method:: Workload.run(context) This method should perform the actual task that is being measured. When this method exits, the task is assumed to be complete. :param context: The :ref:`Context ` for the current run. .. note:: Instruments are kicked off just before calling this method and disabled right after, so everything in this method is being measured. Therefore this method should contain the least code possible to perform the operations you are interested in measuring. Specifically, things like installing or starting applications, processing results, or copying files to/from the device should be done elsewhere if possible. .. method:: Workload.extract_results(context) This method gets invoked after the task execution has finished and should be used to extract metrics from the target. :param context: The :ref:`Context ` for the current run. .. method:: Workload.update_output(context) This method should be used to update the output within the specified execution context with the metrics and artifacts from this workload iteration. :param context: The :ref:`Context ` for the current run. .. method:: Workload.teardown(context) This could be used to perform any cleanup you may wish to do, e.g. Uninstalling applications, deleting file on the device, etc. :param context: The :ref:`Context ` for the current run. .. method:: Workload.finalize(context) This is the complement to ``initialize``. This will be executed exactly once at the end of the run. This should be used to perform any final clean up (e.g. uninstalling binaries installed in the ``initialize``) :param context: The :ref:`Context ` for the current run. .. _apkworkload-api: ApkWorkload ^^^^^^^^^^^^ The :class:`ApkWorkload` derives from the base :class:`Workload` class however this associates the workload with a package allowing for an apk to be found for the workload, setup and ran on the device before running the workload. In addition to the attributes mentioned above ApkWorloads this class also features the following attributes however this class does not present any new methods. ``loading_time`` This is the time in seconds that WA will wait for the application to load before continuing with the run. By default this will wait 10 second however if your application under test requires additional time this values should be increased. ``package_names`` This attribute should be a list of Apk packages names that are suitable for this workload. Both the host (in the relevant resource locations) and device will be searched for an application with a matching package name. ``supported_versions`` This attribute should be a list of apk versions that are suitable for this workload, if a specific apk version is not specified then any available supported version may be chosen. ``activity`` This attribute can be optionally set to override the default activity that will be extracted from the selected APK file which will be used when launching the APK. ``view`` This is the "view" associated with the application. This is used by instruments like ``fps`` to monitor the current framerate being generated by the application. ``apk`` The is a :class:`PackageHandler`` which is what is used to store information about the apk and manage the application itself, the handler is used to call the associated methods to manipulate the application itself for example to launch/close it etc. ``package`` This is a more convenient way to access the package name of the Apk that was found and being used for the run. .. _apkuiautoworkload-api: ApkUiautoWorkload ^^^^^^^^^^^^^^^^^ The :class:`ApkUiautoWorkload` derives from :class:`ApkUIWorkload` which is an intermediate class which in turn inherits from :class:`ApkWorkload`, however in addition to associating an apk with the workload this class allows for automating the application with UiAutomator. This class define these additional attributes: ``gui`` This attribute will be an instance of a :class:`UiAutmatorGUI` which is used to control the automation, and is what is used to pass parameters to the java class for example ``gui.uiauto_params``. .. _apkreventworkload-api: ApkReventWorkload ^^^^^^^^^^^^^^^^^ The :class:`ApkReventWorkload` derives from :class:`ApkUIWorkload` which is an intermediate class which in turn inherits from :class:`ApkWorkload`, however in addition to associating an apk with the workload this class allows for automating the application with :ref:`Revent `. This class define these additional attributes: ``gui`` This attribute will be an instance of a :class:`ReventGUI` which is used to control the automation ``setup_timeout`` This is the time allowed for replaying a recording for the setup stage. ``run_timeout`` This is the time allowed for replaying a recording for the run stage. ``extract_results_timeout`` This is the time allowed for replaying a recording for the extract results stage. ``teardown_timeout`` This is the time allowed for replaying a recording for the teardown stage. .. _uiautoworkload-api: UiautoWorkload ^^^^^^^^^^^^^^ The :class:`UiautoWorkload` derives from :class:`UIWorkload` which is an intermediate class which in turn inherits from :class:`Workload`, however this allows for providing generic automation using UiAutomator without associating a particular application with the workload. This class define these additional attributes: ``gui`` This attribute will be an instance of a :class:`UiAutmatorGUI` which is used to control the automation, and is what is used to pass parameters to the java class for example ``gui.uiauto_params``. .. _reventworkload-api: ReventWorkload ^^^^^^^^^^^^^^ The :class:`ReventWorkload` derives from :class:`UIWorkload` which is an intermediate class which in turn inherits from :class:`Workload`, however this allows for providing generic automation using :ref:`Revent ` without associating with the workload. This class define these additional attributes: ``gui`` This attribute will be an instance of a :class:`ReventGUI` which is used to control the automation ``setup_timeout`` This is the time allowed for replaying a recording for the setup stage. ``run_timeout`` This is the time allowed for replaying a recording for the run stage. ``extract_results_timeout`` This is the time allowed for replaying a recording for the extract results stage. ``teardown_timeout`` This is the time allowed for replaying a recording for the teardown stage. ================================================ FILE: doc/source/api.rst ================================================ Workload Automation API ======================= .. toctree:: :maxdepth: 2 api/output api/workload ================================================ FILE: doc/source/changes.rst ================================================ ================================= What's New in Workload Automation ================================= *********** Version 3.3.1 *********** .. warning:: This is the last release supporting Python 3.5 and Python 3.6. Subsequent releases will support Python 3.7+. New Features: ============== Commands: --------- Instruments: ------------ - ``perf``: Add support for ``report-sample``. Workloads: ---------------- - ``PCMark``: Add support for PCMark 3.0. - ``Antutu``: Add support for 9.1.6. - ``Geekbench``: Add support for Geekbench5. - ``gfxbench``: Support the non corporate version. Fixes/Improvements ================== Framework: ---------- - Fix installation on systems without git installed. - Avoid querying online cpus if hotplug is disabled. Dockerfile: ----------- - Update base image to Ubuntu 20.04. Instruments: ------------ - ``perf``: Fix parsing csv with using interval-only-values. - ``perf``: Improve error reporting of an invalid agenda. Output Processors: ------------------ - ``postgres``: Fixed SQL command when creating a new event. Workloads: ---------- - ``speedometer``: Fix adb reverse when rebooting a device. - ``googleplaybook``: Support newer apk version. - ``googlephotos``: Support newer apk version. - ``gmail``: Support newer apk version. Other: ------ - Upgrade Android Gradle to 7.2 and Gradle plugin to 4.2. *********** Version 3.3 *********** New Features: ============== Commands: --------- - Add ``report`` command to provide a summary of a run. Instruments: ------------ - Add ``proc_stat`` instrument to monitor CPU load using data from ``/proc/stat``. Framework: ---------- - Add support for simulating atomic writes to prevent race conditions when running current instances of WA. - Add support file transfer for SSH connections via SFTP and falling back to using SCP implementation. - Support detection of logcat buffer overflow and present a warning if this occurs. - Allow skipping all remaining jobs if a job had exhausted all of its retires. - Add polling mechanism for file transfers rather than relying on timeouts. - Add `run_completed` reboot policy to enable rebooting a target after a run has been completed. Android Devices: ---------------- - Enable configuration of whether to keep the screen on while the device is plugged in. Output Processors: ------------------ - Enable the use of cascading deletion in Postgres databases to clean up after deletion of a run entry. Fixes/Improvements ================== Framework: ---------- - Improvements to the ``process`` command to correctly handle skipped and in process jobs. - Add support for deprecated parameters allowing for a warning to be raised when providing a parameter that will no longer have an effect. - Switch implementation of SSH connections to use Paramiko for greater stability. - By default use sftp for file transfers with SSH connections, allow falling back to scp by setting ``use_scp``. - Fix callbacks not being disconnected correctly when requested. - ``ApkInfo`` objects are now cached to reduce re-parsing of APK files. - Speed up discovery of wa output directories. - Fix merge handling of parameters from multiple files. Dockerfile: ----------- - Install additional instruments for use in the docker environment. - Fix environment variables not being defined in non interactive environments. Instruments: ------------ - ``trace_cmd`` additional fixes for python 3 support. Output Processors: ------------------ - ``postgres``: Fixed SQL command when creating a new event. Workloads: ---------- - ``aitutu``: Improve reliability of results extraction. - ``androbench``: Enabling dismissing of additional popups on some devices. - ``antutu``: Now supports major version 8 in additional to version 7.X. - ``exoplayer``: Add support for Android 10. - ``googlephotos``: Support newer apk version. - ``gfxbench``: Allow user configuration for which tests should be ran. - ``gfxbench``: Improved score detection for a wider range of devices. - ``gfxbench``: Moved results extraction out of run stage. - ``jankbench``: Support newer versions of Pandas for processing. - ``pcmark``: Add support for handling additional popups and installation flows. - ``pcmark``: No longer clear and re-download test data before each execution. - ``speedometer``: Enable the workload to run offline and drops requirement for UiAutomator. To support this root access is now required to run the workload. - ``youtube``: Update to support later versions of the apk. Other: ------ - ``cpustates``: Improved name handling for unknown idle states. *********** Version 3.2 *********** .. warning:: This release only supports Python 3.5+. Python 2 support has now been dropped. Fixes/Improvements ================== Framework: ---------- - ``TargetInfo`` now tracks installed modules and will ensure the cache is also updated on module change. - Migrated the build scripts for uiauto based workloads to Python 3. - Uiauto applications now target SDK version 28 to prevent PlayProtect blocking the installation of the automation apks on some devices. - The workload metadata now includes the apk package name if applicable. Instruments: ------------ - ``energy_instruments`` will now have their ``teardown`` method called correctly. - ``energy_instruments``: Added a ``keep_raw`` parameter to control whether raw files generated during execution should be deleted upon teardown. - Update relevant instruments to make use of the new devlib collector interface, for more information please see the `devlib documentation `_. Output Processors: ------------------ - ``postgres``: If initialisation fails then the output processor will no longer attempt to reconnect at a later point during the run. - ``postgres``: Will now ensure that the connection to the database is re-established if it is dropped e.g. due to a long expecting workload. - ``postgres``: Change the type of the ``hostid`` field to ``Bigint`` to allow a larger range of ids. - ``postgres``: Bump schema version to 1.5. - ``perf``: Added support for the ``simpleperf`` profiling tool for android devices. - ``perf``: Added support for the perf ``record`` command. - ``cpustates``: Improve handling of situations where cpufreq and/or cpuinfo data is unavailable. Workloads: ---------- - ``adodereader``: Now support apk version 19.7.1.10709. - ``antutu``: Supports dismissing of popup asking to create a shortcut on the homescreen. - ``gmail``: Now supports apk version 2019.05.26.252424914. - ``googlemaps``: Now supports apk version 10.19.1. - ``googlephotos``: Now supports apk version 4.28.0. - ``geekbench``: Added support for versions 4.3.4, 4.4.0 and 4.4.2. - ``geekbench-corporate``: Added support for versions 5.0.1 and 5.0.3. - ``pcmark``: Now locks device orientation to portrait to increase compatibility. - ``pcmark``: Supports dismissing new Android 10 permission warnings. Other: ------ - Improve documentation to help debugging module installation errors. ************* Version 3.1.4 ************* .. warning:: This is the last release that supports Python 2. Subsequent versions will be support Python 3.5+ only. New Features: ============== Framework: ---------- - ``ApkWorkload``: Allow specifying A maximum and minimum version of an APK instead of requiring a specific version. - ``TestPackageHandler``: Added to support running android applications that are invoked via ``am instrument``. - Directories can now be added as ``Artifacts``. Workloads: ---------- - ``aitutu``: Executes the Aitutu Image Speed/Accuracy and Object Speed/Accuracy tests. - ``uibench``: Run a configurable activity of the UIBench workload suite. - ``uibenchjanktests``: Run an automated and instrument version of the UIBench JankTests. - ``motionmark``: Run a browser graphical benchmark. Other: ------ - Added ``requirements.txt`` as a reference for known working package versions. Fixes/Improvements ================== Framework: ---------- - ``JobOuput``: Added an ``augmentation`` attribute to allow listing of enabled augmentations for individual jobs. - Better error handling for misconfiguration job selection. - All ``Workload`` classes now have an ``uninstall`` parameter to control whether any binaries installed to the target should be uninstalled again once the run has completed. - The ``cleanup_assets`` parameter is now more consistently utilized across workloads. - ``ApkWorkload``: Added an ``activity`` attribute to allow for overriding the automatically detected version from the APK. - ``ApkWorkload`` Added support for providing an implicit activity path. - Fixed retrieving job level artifacts from a database backend. Output Processors: ------------------ - ``SysfsExtractor``: Ensure that the extracted directories are added as ``Artifacts``. - ``InterruptStatsInstrument``: Ensure that the output files are added as ``Artifacts``. - ``Postgres``: Fix missing ``system_id`` field from ``TargetInfo``. - ``Postgres``: Support uploading directory ``Artifacts``. - ``Postgres``: Bump the schema version to v1.3. Workloads: ---------- - ``geekbench``: Improved apk version handling. - ``geekbench``: Now supports apk version 4.3.2. Other: ------ - ``Dockerfile``: Now installs all optional extras for use with WA. - Fixed support for YAML anchors. - Fixed building of documentation with Python 3. - Changed shorthand of installing all of WA extras to `all` as per the documentation. - Upgraded the Dockerfile to use Ubuntu 18.10 and Python 3. - Restricted maximum versions of ``numpy`` and ``pandas`` for Python 2.7. ************* Version 3.1.3 ************* Fixes/Improvements ================== Other: ------ - Security update for PyYAML to attempt prevention of arbitrary code execution during parsing. ************* Version 3.1.2 ************* Fixes/Improvements ================== Framework: ---------- - Implement an explicit check for Devlib versions to ensure that versions are kept in sync with each other. - Added a ``View`` parameter to ApkWorkloads for use with certain instruments for example ``fps``. - Added ``"supported_versions"`` attribute to workloads to allow specifying a list of supported version for a particular workload. - Change default behaviour to run any available version of a workload if a specific version is not specified. Output Processors: ------------------ - ``Postgres``: Fix handling of ``screen_resoultion`` during processing. Other ----- - Added additional information to documentation - Added fix for Devlib's ``KernelConfig`` refactor - Added a ``"label"`` property to ``Metrics`` ************* Version 3.1.1 ************* Fixes/Improvements ================== Other ----- - Improve formatting when displaying metrics - Update revent binaries to include latest fixes - Update DockerImage to use new released version of WA and Devlib - Fix broken package on PyPi ************* Version 3.1.0 ************* New Features: ============== Commands --------- - ``create database``: Added :ref:`create subcommand ` command in order to initialize a PostgresSQL database to allow for storing WA output with the Postgres Output Processor. Output Processors: ------------------ - ``Postgres``: Added output processor which can be used to populate a Postgres database with the output generated from a WA run. - ``logcat-regex``: Add new output processor to extract arbitrary "key" "value" pairs from logcat. Configuration: -------------- - :ref:`Configuration Includes `: Add support for including other YAML files inside agendas and config files using ``"include#:"`` entries. - :ref:`Section groups `: This allows for a ``group`` entry to be specified for each section and will automatically cross product the relevant sections with sections from other groups adding the relevant classifiers. Framework: ---------- - Added support for using the :ref:`OutputAPI ` with a Postgres Database backend. Used to retrieve and :ref:`process ` run data uploaded by the ``Postgres`` output processor. Workloads: ---------- - ``gfxbench-corporate``: Execute a set of on and offscreen graphical benchmarks from GFXBench including Car Chase and Manhattan. - ``glbench``: Measures the graphics performance of Android devices by testing the underlying OpenGL (ES) implementation. Fixes/Improvements ================== Framework: ---------- - Remove quotes from ``sudo_cmd`` parameter default value due to changes in devlib. - Various Python 3 related fixes. - Ensure plugin names are converted to identifiers internally to act more consistently when dealing with names containing ``-``'s etc. - Now correctly updates RunInfo with project and run name information. - Add versioning support for POD structures with the ability to automatically update data structures / formats to new versions. Commands: --------- - Fix revent target initialization. - Fix revent argument validation. Workloads: ---------- - ``Speedometer``: Close open tabs upon workload completion. - ``jankbench``: Ensure that the logcat monitor thread is terminated correctly to prevent left over adb processes. - UiAutomator workloads are now able to dismiss android warning that a workload has not been designed for the latest version of android. Other: ------ - Report additional metadata about target, including: system_id, page_size_kb. - Uses cache directory to reduce target calls, e.g. will now use cached version of TargetInfo if local copy is found. - Update recommended :ref:`installation ` commands when installing from github due to pip not following dependency links correctly. - Fix incorrect parameter names in runtime parameter documentation. -------------------------------------------------- ************* Version 3.0.0 ************* WA3 is a more or less from-scratch re-write of WA2. We have attempted to maintain configuration-level compatibility wherever possible (so WA2 agendas *should* mostly work with WA3), however some breaks are likely and minor tweaks may be needed. It terms of the API, WA3 is completely different, and WA2 extensions **will not work** with WA3 -- they would need to be ported into WA3 plugins. For more information on migrating from WA2 to WA3 please see the :ref:`migration-guide`. Not all of WA2 extensions have been ported for the initial 3.0.0 release. We have ported the ones we believe to be most widely used and useful. The porting work will continue, and more of WA2's extensions will be in the future releases. However, we do not intend to port absolutely everything, as some things we believe to be no longer useful. .. note:: If there a particular WA2 extension you would like to see in WA3 that is not yet there, please let us know via the GitHub issues. (And, of course, we always welcome pull requests, if you have the time to do the port yourselves :-) ). New Features ============ - Python 3 support. WA now runs on both Python 2 and Python 3. .. warning:: Python 2 support should now be considered deprecated. Python 2 will still be fully supported up to the next major release (v3.1). After that, Python 2 will be supported for existing functionality, however there will be no guarantee that newly added functionality would be compatible with Python 2. Support for Python 2 will be dropped completely after release v3.2. - There is a new Output API which can be used to aid in post processing a run's output. For more information please see :ref:`output_processing_api`. - All "augmentations" can now be enabled on a per workload basis (in WA2 this was available for instruments, but not result processors). - More portable runtime parameter specification. Runtime parameters now support generic aliases, so instead of specifying ``a73_frequency: 1805000`` in your agenda, and then having to modify this for another target, it is now possible to specify ``big_frequency: max``. - ``-c`` option can now be used multiple times to specify several config files for a single run, allowing for a more fine-grained configuration management. - It is now possible to disable all previously configured augmentations from an agenda using ``~~``. - Offline output processing with ``wa process`` command. It is now possible to run processors on previously collected WA results, without the need for a target connection. - A lot more metadata is collected as part of the run, including much more detailed information about the target, and MD5 hashes of all resources used during the run. - Better ``show`` command. ``wa show`` command now utilizes ``pandoc`` and ``man`` to produce easier-to-browse documentation format, and has been enhanced to include documentation on general settings, runtime parameters, and plugin aliases. - Better logging. The default ``stdout`` output is now more informative. The verbose output is much more detailed. Nested indentation is used for different phases of execution to make log output easier to parse visually. - Full ``ChromeOS`` target support. Including support for the Android container apps. - Implemented on top of devlib_. WA3 plugins can make use of devlib's enhanced target API (much richer and more robust than WA2's Device API). - All-new documentation. The docs have been revamped to be more useful and complete. .. _devlib: https://github.com/ARM-software/devlib Changes ======= - Configuration files ``config.py`` are now specified in YAML format in ``config.yaml``. WA3 has support for automatic conversion of the default config file and will be performed upon first invocation of WA3. - The "config" and "global" sections in an agenda are now interchangeable so can all be specified in a "config" section. - "Results Processors" are now known as "Output Processors" and can now be ran offline. - "Instrumentation" is now known as "Instruments" for more consistent naming. - Both "Output Processor" and "Instrument" configuration have been merged into "Augmentations" (support for the old naming schemes have been retained for backwards compatibility) ================================================ FILE: doc/source/conf.py ================================================ # -*- coding: utf-8 -*- # Copyright 2023 ARM Limited # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # WA3 documentation build configuration file. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex this_dir = os.path.dirname(__file__) sys.path.insert(0, os.path.join(this_dir, '..')) sys.path.insert(0, os.path.join(this_dir, '../..')) import wa from build_plugin_docs import (generate_plugin_documentation, generate_run_config_documentation, generate_meta_config_documentation, generate_target_documentation) from build_instrument_method_map import generate_instrument_method_map # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['static/templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'wa' copyright = u'2023, ARM Limited' author = u'ARM Limited' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = wa.framework.version.get_wa_version() # The full version, including alpha/beta/rc tags. release = wa.framework.version.get_wa_version() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['../build', 'developer_information', 'user_information', 'run_config'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'logo_only': True } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'WA-logo-white.svg' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'wadoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'wa.tex', u'wa Documentation', u'Arm Limited', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'wa', u'wa Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'wa', u'wa Documentation', author, 'wa', 'A framework for automating workload execution on mobile devices.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False def setup(app): module_dir = os.path.join('..', '..', 'wa') excluded_extensions = [os.path.join(module_dir, 'framework'), os.path.join(module_dir, 'tests')] os.chdir(os.path.dirname(__file__)) generate_plugin_documentation(module_dir, 'plugins', excluded_extensions) generate_target_documentation('plugins') generate_run_config_documentation('run_config') generate_meta_config_documentation('run_config') generate_instrument_method_map(os.path.join('developer_information', 'developer_guide', 'instrument_method_map.rst')) app.add_object_type('confval', 'confval', objname='configuration value', indextemplate='pair: %s; configuration value') ================================================ FILE: doc/source/developer_information/developer_guide/writing_plugins.rst ================================================ .. _writing-plugins: Writing Plugins ================ Workload Automation offers several plugin points (or plugin types). The most interesting of these are :workloads: These are the tasks that get executed and measured on the device. These can be benchmarks, high-level use cases, or pretty much anything else. :targets: These are interfaces to the physical devices (development boards or end-user devices, such as smartphones) that use cases run on. Typically each model of a physical device would require its own interface class (though some functionality may be reused by subclassing from an existing base). :instruments: Instruments allow collecting additional data from workload execution (e.g. system traces). Instruments are not specific to a particular workload. Instruments can hook into any stage of workload execution. :output processors: These are used to format the results of workload execution once they have been collected. Depending on the callback used, these will run either after each iteration and/or at the end of the run, after all of the results have been collected. You can create a plugin by subclassing the appropriate base class, defining appropriate methods and attributes, and putting the .py file containing the class into the "plugins" subdirectory under ``~/.workload_automation`` (or equivalent) where it will be automatically picked up by WA. Plugin Basics -------------- This sub-section covers things common to implementing plugins of all types. It is recommended you familiarize yourself with the information here before proceeding onto guidance for specific plugin types. .. _resource-resolution: Dynamic Resource Resolution ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The idea is to decouple resource identification from resource discovery. Workloads/instruments/devices/etc state *what* resources they need, and not *where* to look for them -- this instead is left to the resource resolver that is part of the execution context. The actual discovery of resources is performed by resource getters that are registered with the resolver. A resource type is defined by a subclass of :class:`wa.framework.resource.Resource`. An instance of this class describes a resource that is to be obtained. At minimum, a ``Resource`` instance has an owner (which is typically the object that is looking for the resource), but specific resource types may define other parameters that describe an instance of that resource (such as file names, URLs, etc). An object looking for a resource invokes a resource resolver with an instance of ``Resource`` describing the resource it is after. The resolver goes through the getters registered for that resource type in priority order attempting to obtain the resource; once the resource is obtained, it is returned to the calling object. If none of the registered getters could find the resource, ``NotFoundError`` is raised (or ``None`` is returned instead, if invoked with ``strict=False``). The most common kind of object looking for resources is a ``Workload``, and the ``Workload`` class defines :py:meth:`wa.framework.workload.Workload.init_resources` method, which may be overridden by subclasses to perform resource resolution. For example, a workload looking for an executable file would do so like this:: from wa import Workload from wa.import Executable class MyBenchmark(Workload): # ... def init_resources(self, resolver): resource = Executable(self, self.target.abi, 'my_benchmark') host_exe = resolver.get(resource) # ... Currently available resource types are defined in :py:mod:`wa.framework.resources`. .. _deploying-executables: Deploying executables to a target ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some targets may have certain restrictions on where executable binaries may be placed and how they should be invoked. To ensure your plugin works with as wide a range of targets as possible, you should use WA APIs for deploying and invoking executables on a target, as outlined below. As with other resources, host-side paths to the executable binary to be deployed should be obtained via the :ref:`resource resolver `. A special resource type, ``Executable`` is used to identify a binary to be deployed. This is similar to the regular ``File`` resource, however it takes an additional parameter that specifies the ABI for which the executable was compiled for. In order for the binary to be obtained in this way, it must be stored in one of the locations scanned by the resource resolver in a directory structure ``/bin//`` (where ``root`` is the base resource location to be searched, e.g. ``~/.workload_automation/dependencies/``, and ```` is the ABI for which the executable has been compiled, as returned by ``self.target.abi``). Once the path to the host-side binary has been obtained, it may be deployed using one of two methods from a `Target `_ instance -- ``install`` or ``install_if_needed``. The latter will check a version of that binary has been previously deployed by WA and will not try to re-install. .. code:: python from wa import Executable host_binary = context.get(Executable(self, self.target.abi, 'some_binary')) target_binary = self.target.install_if_needed(host_binary) .. note:: Please also note that the check is done based solely on the binary name. For more information please see the devlib `documentation `_. Both of the above methods will return the path to the installed binary on the target. The executable should be invoked *only* via that path; do **not** assume that it will be in ``PATH`` on the target (or that the executable with the same name in ``PATH`` is the version deployed by WA. For more information on how to implement this, please see the :ref:`how to guide `. Deploying assets ----------------- WA provides a generic mechanism for deploying assets during workload initialization. WA will automatically try to retrieve and deploy each asset to the target's working directory that is contained in a workloads ``deployable_assets`` attribute stored as a list. If the parameter ``cleanup_assets`` is set then any asset deployed will be removed again and the end of the run. If the workload requires a custom deployment mechanism the ``deploy_assets`` method can be overridden for that particular workload, in which case, either additional assets should have their on target paths added to the workload's ``deployed_assests`` attribute or the corresponding ``remove_assets`` method should also be implemented. .. _instrument-reference: Adding an Instrument --------------------- Instruments can be used to collect additional measurements during workload execution (e.g. collect power readings). An instrument can hook into almost any stage of workload execution. Any new instrument should be a subclass of Instrument and it must have a name. When a new instrument is added to Workload Automation, the methods of the new instrument will be found automatically and hooked up to the supported signals. Once a signal is broadcasted, the corresponding registered method is invoked. Each method in ``Instrument`` must take two arguments, which are ``self`` and ``context``. Supported methods and their corresponding signals can be found in the :ref:`Signals Documentation `. To make implementations easier and common, the basic steps to add new instrument is similar to the steps to add new workload and an example can be found in the :ref:`How To ` section. .. _instrument-api: To implement your own instrument the relevant methods of the interface shown below should be implemented: :name: The name of the instrument, this must be unique to WA. :description: A description of what the instrument can be used for. :parameters: A list of additional :class:`Parameters` the instrument can take. :initialize(context): This method will only be called once during the workload run therefore operations that only need to be performed initially should be performed here for example pushing the files to the target device, installing them. :setup(context): This method is invoked after the workload is setup. All the necessary setup should go inside this method. Setup, includes operations like clearing logs, additional configuration etc. :start(context): It is invoked just before the workload start execution. Here is where instrument measurement start being registered/taken. :stop(context): It is invoked just after the workload execution stops and where the measurements should stop being taken/registered. :update_output(context): This method is invoked after the workload updated its result and where the taken measures should be added to the result so it can be processed by WA. :teardown(context): It is invoked after the workload is torn down. It is a good place to clean any logs generated by the instrument. :finalize(context): This method is the complement to the initialize method and will also only be called once so should be used to deleting/uninstalling files pushed to the device. This is similar to a ``Workload``, except all methods are optional. In addition to the workload-like methods, instruments can define a number of other methods that will get invoked at various points during run execution. The most useful of which is perhaps ``initialize`` that gets invoked after the device has been initialised for the first time, and can be used to perform one-time setup (e.g. copying files to the device -- there is no point in doing that for each iteration). The full list of available methods can be found in :ref:`Signals Documentation `. .. _prioritization: Prioritization ~~~~~~~~~~~~~~ Callbacks (e.g. ``setup()`` methods) for all instruments get executed at the same point during workload execution, one after another. The order in which the callbacks get invoked should be considered arbitrary and should not be relied on (e.g. you cannot expect that just because instrument A is listed before instrument B in the config, instrument A's callbacks will run first). In some cases (e.g. in ``start()`` and ``stop()`` methods), it is important to ensure that a particular instrument's callbacks run a closely as possible to the workload's invocations in order to maintain accuracy of readings; or, conversely, that a callback is executed after the others, because it takes a long time and may throw off the accuracy of other instruments. You can do this by using decorators on the appropriate methods. The available decorators are: ``very_slow``, ``slow``, ``normal``, ``fast``, ``very_fast``, with ``very_fast`` running closest to the workload invocation and ``very_slow`` running furtherest away. For example:: from wa import very_fast # .. class PreciseInstrument(Instrument) # ... @very_fast def start(self, context): pass @very_fast def stop(self, context): pass # ... ``PreciseInstrument`` will be started after all other instruments (i.e. *just* before the workload runs), and it will stopped before all other instruments (i.e. *just* after the workload runs). If more than one active instrument has specified fast (or slow) callbacks, then their execution order with respect to each other is not guaranteed. In general, having a lot of instruments enabled is going to negatively affect the readings. The best way to ensure accuracy of measurements is to minimize the number of active instruments (perhaps doing several identical runs with different instruments enabled). Example ^^^^^^^ Below is a simple instrument that measures the execution time of a workload:: class ExecutionTimeInstrument(Instrument): """ Measure how long it took to execute the run() methods of a Workload. """ name = 'execution_time' def initialize(self, context): self.start_time = None self.end_time = None @very_fast def start(self, context): self.start_time = time.time() @very_fast def stop(self, context): self.end_time = time.time() def update_output(self, context): execution_time = self.end_time - self.start_time context.add_metric('execution_time', execution_time, 'seconds') .. include:: developer_information/developer_guide/instrument_method_map.rst .. _adding-an-output-processor: Adding an Output processor ---------------------------- A output processor is responsible for processing the results. This may involve formatting and writing them to a file, uploading them to a database, generating plots, etc. WA comes with a few output processors that output results in a few common formats (such as csv or JSON). You can add your own output processors by creating a Python file in ``~/.workload_automation/plugins`` with a class that derives from :class:`wa.OutputProcessor `, and should implement the relevant methods shown below, for more information and please see the :ref:`Adding an Output Processor ` section. :name: The name of the output processor, this must be unique to WA. :description: A description of what the output processor can be used for. :parameters: A list of additional :class:`Parameters` the output processor can take. :initialize(context): This method will only be called once during the workload run therefore operations that only need to be performed initially should be performed here. :process_job_output(output, target_info, run_ouput): This method should be used to perform the processing of the output from an individual job output. This is where any additional artifacts should be generated if applicable. :export_job_output(output, target_info, run_ouput): This method should be used to perform the exportation of the existing data collected/generated for an individual job. E.g. uploading them to a database etc. :process_run_output(output, target_info): This method should be used to perform the processing of the output from the run as a whole. This is where any additional artifacts should be generated if applicable. :export_run_output(output, target_info): This method should be used to perform the exportation of the existing data collected/generated for the run as a whole. E.g. uploading them to a database etc. :finalize(context): This method is the complement to the initialize method and will also only be called once. The method names should be fairly self-explanatory. The difference between "process" and "export" methods is that export methods will be invoked after process methods for all output processors have been generated. Process methods may generate additional artifacts (metrics, files, etc.), while export methods should not -- they should only handle existing results (upload them to a database, archive on a filer, etc). The output object passed to job methods is an instance of :class:`wa.framework.output.JobOutput`, the output object passed to run methods is an instance of :class:`wa.RunOutput `. Adding a Resource Getter ------------------------ A resource getter is a plugin that is designed to retrieve a resource (binaries, APK files or additional workload assets). Resource getters are invoked in priority order until one returns the desired resource. If you want WA to look for resources somewhere it doesn't by default (e.g. you have a repository of APK files), you can implement a getter for the resource and register it with a higher priority than the standard WA getters, so that it gets invoked first. Instances of a resource getter should implement the following interface:: class ResourceGetter(Plugin): name = None def register(self, resolver): raise NotImplementedError() The getter should define a name for itself (as with all plugins), in addition it should implement the ``register`` method. This involves registering a method with the resolver that should used to be called when trying to retrieve a resource (typically ``get``) along with it's priority (see `Getter Prioritization`_ below. That method should return an instance of the resource that has been discovered (what "instance" means depends on the resource, e.g. it could be a file path), or ``None`` if this getter was unable to discover that resource. Getter Prioritization ~~~~~~~~~~~~~~~~~~~~~ A priority is an integer with higher numeric values indicating a higher priority. The following standard priority aliases are defined for getters: :preferred: Take this resource in favour of the environment resource. :local: Found somewhere under ~/.workload_automation/ or equivalent, or from environment variables, external configuration files, etc. These will override resource supplied with the package. :lan: Resource will be retrieved from a locally mounted remote location (such as samba share) :remote: Resource will be downloaded from a remote location (such as an HTTP server) :package: Resource provided with the package. These priorities are defined as class members of :class:`wa.framework.resource.SourcePriority`, e.g. ``SourcePriority.preferred``. Most getters in WA will be registered with either ``local`` or ``package`` priorities. So if you want your getter to override the default, it should typically be registered as ``preferred``. You don't have to stick to standard priority levels (though you should, unless there is a good reason). Any integer is a valid priority. The standard priorities range from 0 to 40 in increments of 10. Example ~~~~~~~ The following is an implementation of a getter that searches for files in the users dependencies directory, typically ``~/.workload_automation/dependencies/`` It uses the ``get_from_location`` method to filter the available files in the provided directory appropriately:: import sys from wa import settings, from wa.framework.resource import ResourceGetter, SourcePriority from wa.framework.getters import get_from_location from wa.utils.misc import ensure_directory_exists as _d class UserDirectory(ResourceGetter): name = 'user' def register(self, resolver): resolver.register(self.get, SourcePriority.local) def get(self, resource): basepath = settings.dependencies_directory directory = _d(os.path.join(basepath, resource.owner.name)) return get_from_location(directory, resource) .. _adding_a_target: Adding a Target --------------- In WA3, a 'target' consists of a platform and a devlib target. The implementations of the targets are located in ``devlib``. WA3 will instantiate a devlib target passing relevant parameters parsed from the configuration. For more information about devlib targets please see `the documentation `_. The currently available platforms are: :generic: The 'standard' platform implementation of the target, this should work for the majority of use cases. :juno: A platform implementation specifically for the juno. :tc2: A platform implementation specifically for the tc2. :gem5: A platform implementation to interact with a gem5 simulation. The currently available targets from devlib are: :linux: A device running a Linux based OS. :android: A device running Android OS. :local: Used to run locally on a linux based host. :chromeos: A device running ChromeOS, supporting an android container if available. For an example of adding you own customized version of an existing devlib target, please see the how to section :ref:`Adding a Custom Target `. Other Plugin Types --------------------- In addition to plugin types covered above, there are few other, more specialized ones. They will not be covered in as much detail. Most of them expose relatively simple interfaces with only a couple of methods and it is expected that if the need arises to extend them, the API-level documentation that accompanies them, in addition to what has been outlined here, should provide enough guidance. :commands: This allows extending WA with additional sub-commands (to supplement exiting ones outlined in the :ref:`invocation` section). :modules: Modules are "plugins for plugins". They can be loaded by other plugins to expand their functionality (for example, a flashing module maybe loaded by a device in order to support flashing). Packaging Your Plugins ---------------------- If your have written a bunch of plugins, and you want to make it easy to deploy them to new systems and/or to update them on existing systems, you can wrap them in a Python package. You can use ``wa create package`` command to generate appropriate boiler plate. This will create a ``setup.py`` and a directory for your package that you can place your plugins into. For example, if you have a workload inside ``my_workload.py`` and an output processor in ``my_output_processor.py``, and you want to package them as ``my_wa_exts`` package, first run the create command :: wa create package my_wa_exts This will create a ``my_wa_exts`` directory which contains a ``my_wa_exts/setup.py`` and a subdirectory ``my_wa_exts/my_wa_exts`` which is the package directory for your plugins (you can rename the top-level ``my_wa_exts`` directory to anything you like -- it's just a "container" for the setup.py and the package directory). Once you have that, you can then copy your plugins into the package directory, creating ``my_wa_exts/my_wa_exts/my_workload.py`` and ``my_wa_exts/my_wa_exts/my_output_processor.py``. If you have a lot of plugins, you might want to organize them into subpackages, but only the top-level package directory is created by default, and it is OK to have everything in there. .. note:: When discovering plugins through this mechanism, WA traverses the Python module/submodule tree, not the directory structure, therefore, if you are going to create subdirectories under the top level directory created for you, it is important that your make sure they are valid Python packages; i.e. each subdirectory must contain a __init__.py (even if blank) in order for the code in that directory and its subdirectories to be discoverable. At this stage, you may want to edit ``params`` structure near the bottom of the ``setup.py`` to add correct author, license and contact information (see "Writing the Setup Script" section in standard Python documentation for details). You may also want to add a README and/or a COPYING file at the same level as the setup.py. Once you have the contents of your package sorted, you can generate the package by running :: cd my_wa_exts python setup.py sdist This will generate ``my_wa_exts/dist/my_wa_exts-0.0.1.tar.gz`` package which can then be deployed on the target system with standard Python package management tools, e.g. :: sudo pip install my_wa_exts-0.0.1.tar.gz As part of the installation process, the setup.py in the package, will write the package's name into ``~/.workoad_automation/packages``. This will tell WA that the package contains plugin and it will load them next time it runs. .. note:: There are no uninstall hooks in ``setuputils``, so if you ever uninstall your WA plugins package, you will have to manually remove it from ``~/.workload_automation/packages`` otherwise WA will complain about a missing package next time you try to run it. ================================================ FILE: doc/source/developer_information/developer_guide.rst ================================================ .. _developer_guide: *************** Developer Guide *************** .. contents:: :depth: 3 :local: .. include:: developer_information/developer_guide/writing_plugins.rst ================================================ FILE: doc/source/developer_information/developer_reference/contributing.rst ================================================ Contributing ============ Code ---- We welcome code contributions via GitHub pull requests. To help with maintainability of the code line we ask that the code uses a coding style consistent with the rest of WA code. Briefly, it is - `PEP8 `_ with line length and block comment rules relaxed (the wrapper for PEP8 checker inside ``dev_scripts`` will run it with appropriate configuration). - Four-space indentation (*no tabs!*). - Title-case for class names, underscore-delimited lower case for functions, methods, and variables. - Use descriptive variable names. Delimit words with ``'_'`` for readability. Avoid shortening words, skipping vowels, etc (common abbreviations such as "stats" for "statistics", "config" for "configuration", etc are OK). Do *not* use Hungarian notation (so prefer ``birth_date`` over ``dtBirth``). New extensions should also follow implementation guidelines specified in the :ref:`writing-plugins` section of the documentation. We ask that the following checks are performed on the modified code prior to submitting a pull request: .. note:: You will need pylint and pep8 static checkers installed:: pip install pep8 pip install pylint It is recommended that you install via pip rather than through your distribution's package manager because the latter is likely to contain out-of-date version of these tools. - ``./dev_scripts/pylint`` should be run without arguments and should produce no output (any output should be addressed by making appropriate changes in the code or adding a pylint ignore directive, if there is a good reason for keeping the code as is). - ``./dev_scripts/pep8`` should be run without arguments and should produce no output (any output should be addressed by making appropriate changes in the code). - If the modifications touch core framework (anything under ``wa/framework``), unit tests should be run using ``nosetests``, and they should all pass. - If significant additions have been made to the framework, unit tests should be added to cover the new functionality. - If modifications have been made to the UI Automation source of a workload, the corresponding APK should be rebuilt and submitted as part of the same pull request. This can be done via the ``build.sh`` script in the relevant ``uiauto`` subdirectory. - If modifications have been made to documentation (this includes description attributes for Parameters and Extensions), documentation should be built to make sure no errors or warning during build process, and a visual inspection of new/updated sections in resulting HTML should be performed to ensure everything renders as expected. Once you have your contribution is ready, please follow instructions in `GitHub documentation `_ to create a pull request. -------------------------------------------------------------------------------- Documentation ------------- Headings ~~~~~~~~ To allow for consistent headings to be used through out the document the following character sequences should be used when creating headings :: ========= Heading 1 ========= Only used for top level headings which should also have an entry in the navigational side bar. ********* Heading 2 ********* Main page heading used for page title, should not have a top level entry in the side bar. Heading 3 ========== Regular section heading. Heading 4 --------- Sub-heading. Heading 5 ~~~~~~~~~ Heading 6 ^^^^^^^^^ Heading 7 """"""""" -------------------------------------------------------------------------------- Configuration Listings ~~~~~~~~~~~~~~~~~~~~~~ To keep a consistent style for presenting configuration options, the preferred style is to use a `Field List`. (See: http://docutils.sourceforge.net/docs/user/rst/quickref.html#field-lists) Example:: :parameter: My Description Will render as: :parameter: My Description -------------------------------------------------------------------------------- API Style ~~~~~~~~~ When documenting an API the currently preferred style is to provide a short description of the class, followed by the attributes of the class in a `Definition List` followed by the methods using the `method` directive. (See: http://docutils.sourceforge.net/docs/user/rst/quickref.html#definition-lists) Example:: API === :class:`MyClass` ---------------- :class:`MyClass` is an example class to demonstrate API documentation. ``attribute1`` The first attribute of the example class. ``attribute2`` Another attribute example. methods """"""" .. method:: MyClass.retrieve_output(name) Retrieve the output for ``name``. :param name: The output that should be returned. :return: An :class:`Output` object for ``name``. :raises NotFoundError: If no output can be found. Will render as: :class:`MyClass` is an example class to demonstrate API documentation. ``attribute1`` The first attribute of the example class. ``attribute2`` Another attribute example. methods ^^^^^^^ .. method:: MyClass.retrieve_output(name) Retrieve the output for ``name``. :param name: The output that should be returned. :return: An :class:`Output` object for ``name``. :raises NotFoundError: If no output can be found. ================================================ FILE: doc/source/developer_information/developer_reference/framework_overview.rst ================================================ Framework Overview ================== Execution Model --------------- At the high level, the execution model looks as follows: .. image:: developer_information/developer_reference/WA_Execution.svg :scale: 100 % After some initial setup, the framework initializes the device, loads and initialized instruments and output processors and begins executing jobs defined by the workload specs in the agenda. Each job executes in basic stages: initialize Perform any once-per-run initialization of a workload instance, i.e. binary resource resolution. setup Initial setup for the workload is performed. E.g. required assets are deployed to the devices, required services or applications are launched, etc. Run time configuration of the device for the workload is also performed at this time. setup_rerun (apk based workloads only) For some apk based workloads the application is required to be started twice. If the ``requires_rerun`` attribute of the workload is set to ``True`` then after the first setup method is called the application will be killed and then restarted. This method can then be used to perform any additional setup required. run This is when the workload actually runs. This is defined as the part of the workload that is to be measured. Exactly what happens at this stage depends entirely on the workload. extract results Extract any results that have been generated during the execution of the workload from the device and back to that target. Any files pulled from the devices should be added as artifacts to the run context. update output Perform any required parsing and processing of any collected results and add any generated metrics to the run context. teardown Final clean up is performed, e.g. applications may closed, files generated during execution deleted, etc. Signals are dispatched (see :ref:`below `) at each stage of workload execution, which installed instruments can hook into in order to collect measurements, alter workload execution, etc. Instruments implementation usually mirrors that of workloads, defining initialization, setup, teardown and output processing stages for a particular instrument. Instead of a ``run`` method instruments usually implement ``start`` and ``stop`` methods instead which triggered just before and just after a workload run. However, the signal dispatch mechanism gives a high degree of flexibility to instruments allowing them to hook into almost any stage of a WA run (apart from the very early initialization). Metrics and artifacts generated by workloads and instruments are accumulated by the framework and are then passed to active output processors. This happens after each individual workload execution and at the end of the run. A output processor may chose to act at either or both of these points. Control Flow ------------ This section goes into more detail explaining the relationship between the major components of the framework and how control passes between them during a run. It will only go through the major transitions and interactions and will not attempt to describe every single thing that happens. .. note:: This is the control flow for the ``wa run`` command which is the main functionality of WA. Other commands are much simpler and most of what is described below does not apply to them. #. :class:`wa.framework.entrypoint` parses the command from the arguments, creates a :class:`wa.framework.configuration.execution.ConfigManager` and executes the run command (:class:`wa.commands.run.RunCommand`) passing it the ConfigManger. #. Run command initializes the output directory and creates a :class:`wa.framework.configuration.parsers.AgendaParser` and will parser an agenda and populate the ConfigManger based on the command line arguments. Finally it instantiates a :class:`wa.framework.execution.Executor` and passes it the completed ConfigManager. #. The Executor uses the ConfigManager to create a :class:`wa.framework.configuration.core.RunConfiguration` and fully defines the configuration for the run (which will be serialised into ``__meta`` subdirectory under the output directory). #. The Executor proceeds to instantiate a TargetManager, used to handle the device connection and configuration, and a :class:`wa.framework.execution.ExecutionContext` which is used to track the current state of the run execution and also serves as a means of communication between the core framework and plugins. After this any required instruments and output processors are initialized and installed. #. Finally, the Executor instantiates a :class:`wa.framework.execution.Runner`, initializes its job queue with workload specs from the RunConfiguration, and kicks it off. #. The Runner performs the run time configuration of the device and goes through the workload specs (in the order defined by ``execution_order`` setting), running each spec according to the execution model described in the previous section and sending signals (see below) at appropriate points during execution. #. At the end of the run, the control is briefly passed back to the Executor, which outputs a summary for the run. .. _signal_dispatch: Signal Dispatch --------------- WA uses the `louie `_ (formerly, pydispatcher) library for signal dispatch. Callbacks can be registered for signals emitted during the run. WA uses a version of louie that has been modified to introduce :ref:`priority ` to registered callbacks (so that callbacks that are know to be slow can be registered with a lower priority and therefore do not interfere with other callbacks). This mechanism is abstracted for instruments. Methods of an :class:`wa.framework.Instrument` subclass automatically get hooked to appropriate signals based on their names when the instrument is "installed" for the run. Priority can then be specified by adding ``extremely_fast``, ``very_fast``, ``fast`` , ``slow``, ``very_slow`` or ``extremely_slow`` :ref:`decorators ` to the method definitions. The full list of method names and the signals they map to may be seen at the :ref:`instrument method map `. Signal dispatching mechanism may also be used directly, for example to dynamically register callbacks at runtime or allow plugins other than ``Instruments`` to access stages of the run they are normally not aware of. Signals can be either paired or non paired signals. Non paired signals are one off signals that are sent to indicate special events or transitions in execution stages have occurred for example ``TARGET_CONNECTED``. Paired signals are used to signify the start and end of a particular event. If the start signal has been sent the end signal is guaranteed to also be sent, whether the operation was a successes or not, however in the case of correct operation an additional success signal will also be sent. For example in the event of a successful reboot of the the device, the following signals will be sent ``BEFORE_REBOOT``, ``SUCCESSFUL_REBOOT`` and ``AFTER_REBOOT``. An overview of what signals are sent at which point during execution can be seen below. Most of the paired signals have been removed from the diagram for clarity and shown as being dispatched from a particular stage of execution, however in reality these signals will be sent just before and just after these stages are executed. As mentioned above for each of these signals there will be at least 2 and up to 3 signals sent. If the "BEFORE_X" signal (sent just before the stage is ran) is sent then the "AFTER_X" (sent just after the stage is ran) signal is guaranteed to also be sent, and under normal operation a "SUCCESSFUL_X" signal is also sent just after stage has been completed. The diagram also lists the conditional signals that can be sent at any time during execution if something unexpected happens, for example an error occurs or the user aborts the run. .. image:: developer_information/developer_reference/WA_Signal_Dispatch.svg :scale: 100 % For more information see :ref:`Instrumentation Signal-Method Mapping `. ================================================ FILE: doc/source/developer_information/developer_reference/plugins.rst ================================================ .. plugins: Plugins ======= Workload Automation offers several plugin points (or plugin types). The most interesting of these are :workloads: These are the tasks that get executed and measured on the device. These can be benchmarks, high-level use cases, or pretty much anything else. :targets: These are interfaces to the physical devices (development boards or end-user devices, such as smartphones) that use cases run on. Typically each model of a physical device would require its own interface class (though some functionality may be reused by subclassing from an existing base). :instruments: Instruments allow collecting additional data from workload execution (e.g. system traces). Instruments are not specific to a particular workload. Instruments can hook into any stage of workload execution. :output processors: These are used to format the results of workload execution once they have been collected. Depending on the callback used, these will run either after each iteration and/or at the end of the run, after all of the results have been collected. You can create a plugin by subclassing the appropriate base class, defining appropriate methods and attributes, and putting the .py file containing the class into the "plugins" subdirectory under ``~/.workload_automation`` (or equivalent) where it will be automatically picked up by WA. Plugin Basics -------------- This section contains reference information common to plugins of all types. .. _context: The Context ~~~~~~~~~~~ .. note:: For clarification on the meaning of "workload specification" "spec", "job" and "workload" and the distinction between them, please see the :ref:`glossary `. The majority of methods in plugins accept a context argument. This is an instance of :class:`wa.framework.execution.ExecutionContext`. It contains information about the current state of execution of WA and keeps track of things like which workload is currently running. Notable methods of the context are: :context.get_resource(resource, strict=True): This method should be used to retrieve a resource using the resource getters rather than using the ResourceResolver directly as this method additionally record any found resources hash in the output metadata. :context.add_artifact(name, host_file_path, kind, description=None, classifier=None): Plugins can add :ref:`artifacts ` of various kinds to the run output directory for WA and associate them with a description and/or :ref:`classifier `. :context.add_metric(name, value, units=None, lower_is_better=False, classifiers=None): This method should be used to add :ref:`metrics ` that have been generated from a workload, this will allow WA to process the results accordingly depending on which output processors are enabled. Notable attributes of the context are: :context.workload: :class:`wa.framework.workload` object that is currently being executed. :context.tm: This is the target manager that can be used to access various information about the target including initialization parameters. :context.current_job: This is an instance of :class:`wa.framework.job.Job` and contains all the information relevant to the workload job currently being executed. :context.current_job.spec: The current workload specification being executed. This is an instance of :class:`wa.framework.configuration.core.JobSpec` and defines the workload and the parameters under which it is being executed. :context.current_job.current_iteration: The current iteration of the spec that is being executed. Note that this is the iteration for that spec, i.e. the number of times that spec has been run, *not* the total number of all iterations have been executed so far. :context.job_output: This is the output object for the current iteration which is an instance of :class:`wa.framework.output.JobOutput`. It contains the status of the iteration as well as the metrics and artifacts generated by the job. In addition to these, context also defines a few useful paths (see below). Paths ~~~~~ You should avoid using hard-coded absolute paths in your plugins whenever possible, as they make your code too dependent on a particular environment and may mean having to make adjustments when moving to new (host and/or device) platforms. To help avoid hard-coded absolute paths, WA defines a number of standard locations. You should strive to define your paths relative to one of these. On the host ^^^^^^^^^^^ Host paths are available through the context object, which is passed to most plugin methods. context.run_output_directory This is the top-level output directory for all WA results (by default, this will be "wa_output" in the directory in which WA was invoked. context.output_directory This is the output directory for the current iteration. This will an iteration-specific subdirectory under the main results location. If there is no current iteration (e.g. when processing overall run results) this will point to the same location as ``run_output_directory``. Additionally, the global ``wa.settings`` object exposes on other location: settings.dependency_directory this is the root directory for all plugin dependencies (e.g. media files, assets etc) that are not included within the plugin itself. As per Python best practice, it is recommended that methods and values in ``os.path`` standard library module are used for host path manipulation. On the target ^^^^^^^^^^^^^ Workloads and instruments have a ``target`` attribute, which is an interface to the target used by WA. It defines the following location: target.working_directory This is the directory for all WA-related files on the target. All files deployed to the target should be pushed to somewhere under this location (the only exception being executables installed with ``target.install`` method). Since there could be a mismatch between path notation used by the host and the target, the ``os.path`` modules should *not* be used for on-target path manipulation. Instead target has an equipment module exposed through ``target.path`` attribute. This has all the same attributes and behaves the same way as ``os.path``, but is guaranteed to produce valid paths for the target, irrespective of the host's path notation. For example: .. code:: python result_file = self.target.path.join(self.target.working_directory, "result.txt") self.command = "{} -a -b -c {}".format(target_binary, result_file) .. note:: Output processors, unlike workloads and instruments, do not have their own target attribute as they are designed to be able to be run offline. .. _plugin-parameters: Parameters ~~~~~~~~~~~ All plugins can be parametrized. Parameters are specified using ``parameters`` class attribute. This should be a list of :class:`wa.framework.plugin.Parameter` instances. The following attributes can be specified on parameter creation: :name: This is the only mandatory argument. The name will be used to create a corresponding attribute in the plugin instance, so it must be a valid Python identifier. :kind: This is the type of the value of the parameter. This must be an callable. Normally this should be a standard Python type, e.g. ``int`` or ``float``, or one the types defined in :mod:`wa.utils.types`. If not explicitly specified, this will default to ``str``. .. note:: Irrespective of the ``kind`` specified, ``None`` is always a valid value for a parameter. If you don't want to allow ``None``, then set ``mandatory`` (see below) to ``True``. :allowed_values: A list of the only allowed values for this parameter. .. note:: For composite types, such as ``list_of_strings`` or ``list_of_ints`` in :mod:`wa.utils.types`, each element of the value will be checked against ``allowed_values`` rather than the composite value itself. :default: The default value to be used for this parameter if one has not been specified by the user. Defaults to ``None``. :mandatory: A ``bool`` indicating whether this parameter is mandatory. Setting this to ``True`` will make ``None`` an illegal value for the parameter. Defaults to ``False``. .. note:: Specifying a ``default`` will mean that this parameter will, effectively, be ignored (unless the user sets the param to ``None``). .. note:: Mandatory parameters are *bad*. If at all possible, you should strive to provide a sensible ``default`` or to make do without the parameter. Only when the param is absolutely necessary, and there really is no sensible default that could be given (e.g. something like login credentials), should you consider making it mandatory. :constraint: This is an additional constraint to be enforced on the parameter beyond its type or fixed allowed values set. This should be a predicate (a function that takes a single argument -- the user-supplied value -- and returns a ``bool`` indicating whether the constraint has been satisfied). :override: A parameter name must be unique not only within an plugin but also with that plugin's class hierarchy. If you try to declare a parameter with the same name as already exists, you will get an error. If you do want to override a parameter from further up in the inheritance hierarchy, you can indicate that by setting ``override`` attribute to ``True``. When overriding, you do not need to specify every other attribute of the parameter, just the ones you what to override. Values for the rest will be taken from the parameter in the base class. Validation and cross-parameter constraints ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A plugin will get validated at some point after construction. When exactly this occurs depends on the plugin type, but it *will* be validated before it is used. You can implement ``validate`` method in your plugin (that takes no arguments beyond the ``self``) to perform any additional *internal* validation in your plugin. By "internal", I mean that you cannot make assumptions about the surrounding environment (e.g. that the device has been initialized). The contract for ``validate`` method is that it should raise an exception (either ``wa.framework.exception.ConfigError`` or plugin-specific exception type -- see further on this page) if some validation condition has not, and cannot, been met. If the method returns without raising an exception, then the plugin is in a valid internal state. Note that ``validate`` can be used not only to verify, but also to impose a valid internal state. In particular, this where cross-parameter constraints can be resolved. If the ``default`` or ``allowed_values`` of one parameter depend on another parameter, there is no way to express that declaratively when specifying the parameters. In that case the dependent attribute should be left unspecified on creation and should instead be set inside ``validate``. Logging ~~~~~~~ Every plugin class has it's own logger that you can access through ``self.logger`` inside the plugin's methods. Generally, a :class:`Target` will log everything it is doing, so you shouldn't need to add much additional logging for device actions. However you might what to log additional information, e.g. what settings your plugin is using, what it is doing on the host, etc. (Operations on the host will not normally be logged, so your plugin should definitely log what it is doing on the host). One situation in particular where you should add logging is before doing something that might take a significant amount of time, such as downloading a file. Documenting ~~~~~~~~~~~ All plugins and their parameter should be documented. For plugins themselves, this is done through ``description`` class attribute. The convention for an plugin description is that the first paragraph should be a short summary description of what the plugin does and why one would want to use it (among other things, this will get extracted and used by ``wa list`` command). Subsequent paragraphs (separated by blank lines) can then provide a more detailed description, including any limitations and setup instructions. For parameters, the description is passed as an argument on creation. Please note that if ``default``, ``allowed_values``, or ``constraint``, are set in the parameter, they do not need to be explicitly mentioned in the description (wa documentation utilities will automatically pull those). If the ``default`` is set in ``validate`` or additional cross-parameter constraints exist, this *should* be documented in the parameter description. Both plugins and their parameters should be documented using reStructureText markup (standard markup for Python documentation). See: http://docutils.sourceforge.net/rst.html Aside from that, it is up to you how you document your plugin. You should try to provide enough information so that someone unfamiliar with your plugin is able to use it, e.g. you should document all settings and parameters your plugin expects (including what the valid values are). Error Notification ~~~~~~~~~~~~~~~~~~ When you detect an error condition, you should raise an appropriate exception to notify the user. The exception would typically be :class:`ConfigError` or (depending the type of the plugin) :class:`WorkloadError`/:class:`DeviceError`/:class:`InstrumentError`/:class:`OutputProcessorError`. All these errors are defined in :mod:`wa.framework.exception` module. A :class:`ConfigError` should be raised where there is a problem in configuration specified by the user (either through the agenda or config files). These errors are meant to be resolvable by simple adjustments to the configuration (and the error message should suggest what adjustments need to be made. For all other errors, such as missing dependencies, mis-configured environment, problems performing operations, etc., the plugin type-specific exceptions should be used. If the plugin itself is capable of recovering from the error and carrying on, it may make more sense to log an ERROR or WARNING level message using the plugin's logger and to continue operation. .. _metrics: Metrics ~~~~~~~ This is what WA uses to store a single metric collected from executing a workload. :name: the name of the metric. Uniquely identifies the metric within the results. :value: The numerical value of the metric for this execution of a workload. This can be either an int or a float. :units: Units for the collected value. Can be None if the value has no units (e.g. it's a count or a standardised score). :lower_is_better: Boolean flag indicating where lower values are better than higher ones. Defaults to False. :classifiers: A set of key-value pairs to further classify this metric beyond current iteration (e.g. this can be used to identify sub-tests). Metrics can be added to WA output via the :ref:`context `: .. code-block:: python context.add_metric("score", 9001) context.add_metric("time", 2.35, "seconds", lower_is_better=True) You only need to specify the name and the value for the metric. Units and classifiers are optional, and, if not specified otherwise, it will be assumed that higher values are better (``lower_is_better=False``). The metric will be added to the result for the current job, if there is one; otherwise, it will be added to the overall run result. .. _artifact: Artifacts ~~~~~~~~~ This is an artifact generated during execution/post-processing of a workload. Unlike :ref:`metrics `, this represents an actual artifact, such as a file, generated. This may be "output", such as trace, or it could be "meta data" such as logs. These are distinguished using the ``kind`` attribute, which also helps WA decide how it should be handled. Currently supported kinds are: :log: A log file. Not part of the "output" as such but contains information about the run/workload execution that be useful for diagnostics/meta analysis. :meta: A file containing metadata. This is not part of the "output", but contains information that may be necessary to reproduce the results (contrast with ``log`` artifacts which are *not* necessary). :data: This file contains new data, not available otherwise and should be considered part of the "output" generated by WA. Most traces would fall into this category. :export: Exported version of results or some other artifact. This signifies that this artifact does not contain any new data that is not available elsewhere and that it may be safely discarded without losing information. :raw: Signifies that this is a raw dump/log that is normally processed to extract useful information and is then discarded. In a sense, it is the opposite of ``export``, but in general may also be discarded. .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on how important it is to preserve this file, e.g. when archiving, vs how much space it takes up. Unlike ``export`` artifacts which are (almost) always ignored by other exporters as that would never result in data loss, ``raw`` files *may* be processed by exporters if they decided that the risk of losing potentially (though unlikely) useful data is greater than the time/space cost of handling the artifact (e.g. a database uploader may choose to ignore ``raw`` artifacts, whereas a network filer archiver may choose to archive them). .. note: The kind parameter is intended to represent the logical function of a particular artifact, not it's intended means of processing -- this is left entirely up to the output processors. As with :ref:`metrics`, artifacts are added via the :ref:`context `: .. code-block:: python context.add_artifact("benchmark-output", "bech-out.txt", kind="raw", description="stdout from running the benchmark") .. note:: The file *must* exist on the host by the point at which the artifact is added, otherwise an error will be raised. The artifact will be added to the result of the current job, if there is one; otherwise, it will be added to the overall run result. In some situations, you may wish to add an artifact to the overall run while being inside a job context, this can be done with ``add_run_artifact``: .. code-block:: python context.add_run_artifact("score-summary", "scores.txt", kind="export", description=""" Summary of the scores so far. Updated after every job. """) In this case, you also need to make sure that the file represented by the artifact is written to the output directory for the run and not the current job. .. _metadata: Metadata ~~~~~~~~ There may be additional data collected by your plugin that you want to record as part of the result, but that does not fall under the definition of a "metric". For example, you may want to record the version of the binary you're executing. You can do this by adding a metadata entry: .. code-block:: python context.add_metadata("exe-version", 1.3) Metadata will be added either to the current job result, or to the run result, depending on the current context. Metadata values can be scalars or nested structures of dicts/sequences; the only constraint is that all constituent objects of the value must be POD (Plain Old Data) types -- see :ref:`WA POD types `. There is special support for handling metadata entries that are dicts of values. The following call adds a metadata entry ``"versions"`` who's value is ``{"my_exe": 1.3}``: .. code-block:: python context.add_metadata("versions", "my_exe", 1.3) If you attempt to add a metadata entry that already exists, an error will be raised, unless ``force=True`` is specified, in which case, it will be overwritten. Updating an existing entry whose value is a collection can be done with ``update_metadata``: .. code-block:: python context.update_metadata("ran_apps", "my_exe") context.update_metadata("versions", "my_other_exe", "2.3.0") The first call appends ``"my_exe"`` to the list at metadata entry ``"ran_apps"``. The second call updates the ``"versions"`` dict in the metadata with an entry for ``"my_other_exe"``. If an entry does not exit, ``update_metadata`` will create it, so it's recommended to always use that for non-scalar entries, unless the intention is specifically to ensure that the entry does not exist at the time of the call. .. _classifiers: Classifiers ~~~~~~~~~~~ Classifiers are key-value pairs of tags that can be attached to metrics, artifacts, jobs, or the entire run. Run and job classifiers get propagated to metrics and artifacts. Classifier keys should be strings, and their values should be simple scalars (i.e. strings, numbers, or bools). Classifiers can be thought of as "tags" that are used to annotate metrics and artifacts, in order to make it easier to sort through them later. WA itself does not do anything with them, however output processors will augment the output they generate with them (for example, ``csv`` processor can add additional columns for classifier keys). Classifiers are typically added by the user to attach some domain-specific information (e.g. experiment configuration identifier) to the results, see :ref:`using classifiers `. However, plugins can also attach additional classifiers, by specifying them in ``add_metric()`` and ``add_artifacts()`` calls. Metadata vs Classifiers ~~~~~~~~~~~~~~~~~~~~~~~ Both metadata and classifiers are sets of essentially opaque key-value pairs that get included in WA output. While they may seem somewhat similar and interchangeable, they serve different purposes and are handled differently by the framework. Classifiers are used to annotate generated metrics and artifacts in order to assist post-processing tools in sorting through them. Metadata is used to record additional information that is not necessary for processing the results, but that may be needed in order to reproduce them or to make sense of them in a grander context. These are specific differences in how they are handled: - Classifiers are often provided by the user via the agenda (though can also be added by plugins). Metadata in only created by the framework and plugins. - Classifier values must be simple scalars; metadata values can be nested collections, such as lists or dicts. - Classifiers are used by output processors to augment the output the latter generated; metadata typically isn't. - Classifiers are essentially associated with the individual metrics and artifacts (though in the agenda they're specified at workload, section, or global run levels); metadata is associated with a particular job or run, and not with metrics or artifacts. -------------------- .. _execution-decorators: Execution Decorators --------------------- The following decorators are available for use in order to control how often a method should be able to be executed. For example, if we want to ensure that no matter how many iterations of a particular workload are ran, we only execute the initialize method for that instance once, we would use the decorator as follows: .. code-block:: python from wa.utils.exec_control import once @once def initialize(self, context): # Perform one time initialization e.g. installing a binary to target # .. @once_per_instance ~~~~~~~~~~~~~~~~~~ The specified method will be invoked only once for every bound instance within the environment. @once_per_class ~~~~~~~~~~~~~~~ The specified method will be invoked only once for all instances of a class within the environment. @once ~~~~~ The specified method will be invoked only once within the environment. .. warning:: If a method containing a super call is decorated, this will also cause stop propagation up the hierarchy, unless this is the desired effect, additional functionality should be implemented in a separate decorated method which can then be called allowing for normal propagation to be retained. -------------------- Utils ----- Workload Automation defines a number of utilities collected under :mod:`wa.utils` subpackage. These utilities were created to help with the implementation of the framework itself, but may be also be useful when implementing plugins. -------------------- Workloads --------- All of the type inherit from the same base :class:`Workload` and its API can be seen in the :ref:`API ` section. Workload methods (except for ``validate``) take a single argument that is a :class:`wa.framework.execution.ExecutionContext` instance. This object keeps track of the current execution state (such as the current workload, iteration number, etc), and contains, among other things, a :class:`wa.framework.output.JobOutput` instance that should be populated from the ``update_output`` method with the results of the execution. For more information please see `the context`_ documentation. :: # ... def update_output(self, context): # ... context.add_metric('energy', 23.6, 'Joules', lower_is_better=True) # ... .. _workload-types: Workload Types ~~~~~~~~~~~~~~~~ There are multiple workload types that you can inherit from depending on the purpose of your workload, the different types along with an output of their intended use cases are outlined below. .. _basic-workload: Basic (:class:`wa.Workload `) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This type of the workload is the simplest type of workload and is left the to developer to implement its full functionality. .. _apk-workload: Apk (:class:`wa.ApkWorkload `) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This workload will simply deploy and launch an android app in its basic form with no UI interaction. .. _uiautomator-workload: UiAuto (:class:`wa.UiautoWorkload `) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This workload is for android targets which will use UiAutomator to interact with UI elements without a specific android app, for example performing manipulation of android itself. This is the preferred type of automation as the results are more portable and reproducible due to being able to wait for UI elements to appear rather than having to rely on human recordings. .. _apkuiautomator-workload: ApkUiAuto (:class:`wa.ApkUiautoWorkload `) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The is the same as the UiAuto workload however it is also associated with an android app e.g. AdobeReader and will automatically deploy and launch the android app before running the automation. .. _revent-workload: Revent (:class:`wa.ReventWorkload `) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Revent workloads are designed primarily for games as these are unable to be automated with UiAutomator due to the fact that they are rendered within a single UI element. They require a recording to be performed manually and currently will need re-recording for each different device. For more information on revent workloads been please see :ref:`revent_files_creation` .. _apkrevent-workload: APKRevent (:class:`wa.ApkReventWorkload `) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The is the same as the Revent workload however it is also associated with an android app e.g. AngryBirds and will automatically deploy and launch the android app before running the automation. ================================================ FILE: doc/source/developer_information/developer_reference/revent.rst ================================================ Revent Recordings ================= Convention for Naming revent Files for Revent Workloads ------------------------------------------------------------------------------- There is a convention for naming revent files which you should follow if you want to record your own revent files. Each revent file must be called (case sensitive) ``..revent``, where ```` is the name of your device (as defined by the model name of your device which can be retrieved with ``adb shell getprop ro.product.model`` or by the ``name`` attribute of your customized device class), and ```` is one of the following currently supported stages: :setup: This stage is where the application is loaded (if present). It is a good place to record an revent here to perform any tasks to get ready for the main part of the workload to start. :run: This stage is where the main work of the workload should be performed. This will allow for more accurate results if the revent file for this stage only records the main actions under test. :extract_results: This stage is used after the workload has been completed to retrieve any metrics from the workload e.g. a score. :teardown: This stage is where any final actions should be performed to clean up the workload. Only the run stage is mandatory, the remaining stages will be replayed if a recording is present otherwise no actions will be performed for that particular stage. All your custom revent files should reside at ``'$WA_USER_DIRECTORY/dependencies/WORKLOAD NAME/'``. So typically to add a custom revent files for a device named "mydevice" and a workload name "myworkload", you would need to add the revent files to the directory ``~/.workload_automation/dependencies/myworkload/revent_files`` creating the directory structure if necessary. :: mydevice.setup.revent mydevice.run.revent mydevice.extract_results.revent mydevice.teardown.revent Any revent file in the dependencies will always overwrite the revent file in the workload directory. So for example it is possible to just provide one revent for setup in the dependencies and use the run.revent that is in the workload directory. File format of revent recordings -------------------------------- You do not need to understand recording format in order to use revent. This section is intended for those looking to extend revent in some way, or to utilize revent recordings for other purposes. Format Overview ~~~~~~~~~~~~~~~ Recordings are stored in a binary format. A recording consists of three sections:: +-+-+-+-+-+-+-+-+-+-+-+ | Header | +-+-+-+-+-+-+-+-+-+-+-+ | | | Device Description | | | +-+-+-+-+-+-+-+-+-+-+-+ | | | | | Event Stream | | | | | +-+-+-+-+-+-+-+-+-+-+-+ The header contains metadata describing the recording. The device description contains information about input devices involved in this recording. Finally, the event stream contains the recorded input events. All fields are either fixed size or prefixed with their length or the number of (fixed-sized) elements. .. note:: All values below are little endian Recording Header ~~~~~~~~~~~~~~~~ An revent recoding header has the following structure * It starts with the "magic" string ``REVENT`` to indicate that this is an revent recording. * The magic is followed by a 16 bit version number. This indicates the format version of the recording that follows. Current version is ``2``. * The next 16 bits indicate the type of the recording. This dictates the structure of the Device Description section. Valid values are: ``0`` This is a general input event recording. The device description contains a list of paths from which the events where recorded. ``1`` This a gamepad recording. The device description contains the description of the gamepad used to create the recording. * The header is zero-padded to 128 bits. :: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 'R' | 'E' | 'V' | 'E' | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 'N' | 'T' | Version | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Mode | PADDING | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | PADDING | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Device Description ~~~~~~~~~~~~~~~~~~ This section describes the input devices used in the recording. Its structure is determined by the value of ``Mode`` field in the header. General Recording ~~~~~~~~~~~~~~~~~ .. note:: This is the only format supported prior to version ``2``. The recording has been made from all available input devices. This section contains the list of ``/dev/input`` paths for the devices, prefixed with total number of the devices recorded. :: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Number of devices | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | Device paths +-+-+-+-+-+-+-+-+-+-+-+-+ | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Similarly, each device path is a length-prefixed string. Unlike C strings, the path is *not* NULL-terminated. :: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Length of device path | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | Device path | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Gamepad Recording ~~~~~~~~~~~~~~~~~ The recording has been made from a specific gamepad. All events in the stream will be for that device only. The section describes the device properties that will be used to create a virtual input device using ``/dev/uinput``. Please see ``linux/input.h`` header in the Linux kernel source for more information about the fields in this section. :: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | bustype | vendor | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | product | version | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | name_length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | name | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ev_bits | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | key_bits (96 bytes) | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | rel_bits (96 bytes) | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | abs_bits (96 bytes) | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | num_absinfo | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | | | absinfo entries | | | | | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Each ``absinfo`` entry consists of six 32 bit values. The number of entries is determined by the ``abs_bits`` field. :: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | value | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | minimum | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | maximum | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | fuzz | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | flat | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | resolution | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Event Stream ~~~~~~~~~~~~ The majority of an revent recording will be made up of the input events that were recorded. The event stream is prefixed with the number of events in the stream, and start and end times for the recording. :: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Number of events | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Number of events (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Start Time Seconds | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Start Time Seconds (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Start Time Microseconds | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Start Time Microseconds (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | End Time Seconds | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | End Time Seconds (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | End Time Microseconds | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | End Time Microseconds (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | Events | | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+ | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Event Structure ~~~~~~~~~~~~~~~ Each event entry structured as follows: * An unsigned short integer representing which device from the list of device paths this event is for (zero indexed). E.g. Device ID = 3 would be the 4th device in the list of device paths. * A unsigned long integer representing the number of seconds since "epoch" when the event was recorded. * A unsigned long integer representing the microseconds part of the timestamp. * An unsigned integer representing the event type * An unsigned integer representing the event code * An unsigned integer representing the event value For more information about the event type, code and value please read: https://www.kernel.org/doc/Documentation/input/event-codes.txt :: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Device ID | Timestamp Seconds | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Timestamp Seconds (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Timestamp Seconds (cont.) | stamp Micoseconds | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Timestamp Micoseconds (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Timestamp Micoseconds (cont.) | Event Type | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Event Code | Event Value | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Event Value (cont.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Parser ~~~~~~ WA has a parser for revent recordings. This can be used to work with revent recordings in scripts. Here is an example: .. code:: python from wa.utils.revent import ReventRecording with ReventRecording('/path/to/recording.revent') as recording: print("Recording: {}".format(recording.filepath)) print("There are {} input events".format(recording.num_events)) print("Over a total of {} seconds".format(recording.duration)) ================================================ FILE: doc/source/developer_information/developer_reference/serialization.rst ================================================ .. _serialization: Serialization ============= Overview of Serialization ------------------------- WA employs a serialization mechanism in order to store some of its internal structures inside the output directory. Serialization is performed in two stages: 1. A serializable object is converted into a POD (Plain Old Data) structure consisting of primitive Python types, and a few additional types (see :ref:`wa-pods` below). 2. The POD structure is serialized into a particular format by a generic parser for that format. Currently, `yaml` and `json` are supported. Deserialization works in reverse order -- first the serialized text is parsed into a POD, which is then converted to the appropriate object. Implementing Serializable Objects --------------------------------- In order to be considered serializable, an object must either be a POD, or it must implement the ``to_pod()`` method and ``from_pod`` static/class method, which will perform the conversion to/form pod. As an example, below as a (somewhat trimmed) implementation of the ``Event`` class: .. code-block:: python class Event(object): @staticmethod def from_pod(pod): instance = Event(pod['message']) instance.timestamp = pod['timestamp'] return instance def __init__(self, message): self.timestamp = datetime.utcnow() self.message = message def to_pod(self): return dict( timestamp=self.timestamp, message=self.message, ) Serialization API ----------------- .. function:: read_pod(source, fmt=None) .. function:: write_pod(pod, dest, fmt=None) These read and write PODs from a file. The format will be inferred, if possible, from the extension of the file, or it may be specified explicitly with ``fmt``. ``source`` and ``dest`` can be either strings, in which case they will be interpreted as paths, or they can be file-like objects. .. function:: is_pod(obj) Returns ``True`` if ``obj`` is a POD, and ``False`` otherwise. .. function:: dump(o, wfh, fmt='json', \*args, \*\*kwargs) .. function:: load(s, fmt='json', \*args, \*\*kwargs) These implment an altenative serialization interface, which matches the interface exposed by the parsers for the supported formats. .. _wa-pods: WA POD Types ------------ POD types are types that can be handled by a serializer directly, without a need for any additional information. These consist of the build-in python types :: list tuple dict set str unicode int float bool ...the standard library types :: OrderedDict datetime ...and the WA-defined types :: regex_type none_type level cpu_mask Any structure consisting entirely of these types is a POD and can be serialized and then deserialized without losing information. It is important to note that only these specific types are considered POD, their subclasses are *not*. .. note:: ``dict``\ s get deserialized as ``OrderedDict``\ s. Serialization Formats --------------------- WA utilizes two serialization formats: YAML and JSON. YAML is used for files intended to be primarily written and/or read by humans; JSON is used for files intended to be primarily written and/or read by WA and other programs. The parsers and serializers for these formats used by WA have been modified to handle additional types (e.g. regular expressions) that are typically not supported by the formats. This was done in such a way that the resulting files are still valid and can be parsed by any parser for that format. ================================================ FILE: doc/source/developer_information/developer_reference.rst ================================================ .. _developer_reference: ******************** Developer Reference ******************** .. contents:: :depth: 3 :local: .. include:: developer_information/developer_reference/framework_overview.rst ----------------- .. include:: developer_information/developer_reference/plugins.rst ----------------- .. include:: developer_information/developer_reference/revent.rst ----------------- .. include:: developer_information/developer_reference/serialization.rst ----------------- .. include:: developer_information/developer_reference/contributing.rst ================================================ FILE: doc/source/developer_information/how_to.rst ================================================ ******* How Tos ******* .. contents:: Contents :depth: 4 :local: .. include:: developer_information/how_tos/adding_plugins.rst .. include:: developer_information/how_tos/processing_output.rst ================================================ FILE: doc/source/developer_information/how_tos/adding_plugins.rst ================================================ .. _deploying-executables-example: Deploying Executables ===================== Installing binaries for a particular plugin should generally only be performed once during a run. This should typically be done in the ``initialize`` method, if the only functionality performed in the method is to install the required binaries then the ``initialize`` method should be decorated with the ``@once`` :ref:`decorator ` otherwise this should be placed into a dedicated method which is decorated instead. Please note if doing this then any installed paths should be added as class attributes rather than instance variables. As a general rule if binaries are installed as part of ``initialize`` then they should be uninstalled in the complementary ``finalize`` method. Part of an example workload demonstrating this is shown below: .. code:: python class MyWorkload(Workload): #.. @once def initialize(self, context): resource = Executable(self, self.target.abi, 'my_executable') host_binary = context.resolver.get(resource) MyWorkload.target_binary = self.target.install(host_binary) #.. def setup(self, context): self.command = "{} -a -b -c".format(self.target_binary) self.target.execute(self.command) #.. @once def finalize(self, context): self.target.uninstall('my_executable') .. _adding-a-workload-example: Adding a Workload ================= The easiest way to create a new workload is to use the :ref:`create ` command. ``wa create workload ``. This will use predefined templates to create a workload based on the options that are supplied to be used as a starting point for the workload. For more information on using the create workload command see ``wa create workload -h`` The first thing to decide is the type of workload you want to create depending on the OS you will be using and the aim of the workload. The are currently 6 available workload types to choose as detailed in the :ref:`Developer Reference `. Once you have decided what type of workload you wish to choose this can be specified with ``-k `` followed by the workload name. This will automatically generate a workload in the your ``WA_CONFIG_DIR/plugins``. If you wish to specify a custom location this can be provided with ``-p `` A typical invocation of the :ref:`create ` command would be in the form:: wa create workload -k .. _adding-a-basic-workload-example: Adding a Basic Workload ----------------------- To add a ``basic`` workload template for our example workload we can simply use the command:: wa create workload -k basic ziptest This will generate a very basic workload with dummy methods for the each method in the workload interface and it is left to the developer to add any required functionality. Not all the methods from the interface are required to be implemented, this example shows how a subset might be used to implement a simple workload that times how long it takes to compress a file of a particular size on the device. .. note:: This is intended as an example of how to implement the Workload :ref:`interface `. The methodology used to perform the actual measurement is not necessarily sound, and this Workload should not be used to collect real measurements. The first step is to subclass our desired :ref:`workload type ` depending on the purpose of our workload, in this example we are implementing a very simple workload and do not require any additional feature so shall inherit directly from the the base :class:`Workload` class. We then need to provide a ``name`` for our workload which is what will be used to identify your workload for example in an agenda or via the show command, if you used the `create` command this will already be populated for you. .. code-block:: python import os from wa import Workload, Parameter class ZipTest(Workload): name = 'ziptest' The ``description`` attribute should be a string in the structure of a short summary of the purpose of the workload, and will be shown when using the :ref:`list command `, followed by a more in- depth explanation separated by a new line. .. code-block:: python description = ''' Times how long it takes to gzip a file of a particular size on a device. This workload was created for illustration purposes only. It should not be used to collect actual measurements. ''' In order to allow for additional configuration of the workload from a user a list of :ref:`parameters ` can be supplied. These can be configured in a variety of different ways. For example here we are ensuring that the value of the parameter is an integer and larger than 0 using the ``kind`` and ``constraint`` options, also if no value is provided we are providing a ``default`` value of 2000000. These parameters will automatically have their value set as an attribute of the workload so later on we will be able to use the value provided here as ``self.file_size``. .. code-block:: python parameters = [ Parameter('file_size', kind=int, default=2000000, constraint=lambda x: 0 < x, description='Size of the file (in bytes) to be gzipped.') ] Next we will implement our ``setup`` method. This is where we do any preparation that is required before the workload is ran, this is usually things like setting up required files on the device and generating commands from user input. In this case we will generate our input file on the host system and then push it to a known location on the target for use in the 'run' stage. .. code-block:: python def setup(self, context): super(ZipTestWorkload, self).setup(context) # Generate a file of the specified size containing random garbage. host_infile = os.path.join(context.output_directory, 'infile') command = 'openssl rand -base64 {} > {}'.format(self.file_size, host_infile) os.system(command) # Set up on-device paths devpath = self.target.path # os.path equivalent for the target self.target_infile = devpath.join(self.target.working_directory, 'infile') self.target_outfile = devpath.join(self.target.working_directory, 'outfile') # Push the file to the target self.target.push(host_infile, self.target_infile) The ``run`` method is where the actual 'work' of the workload takes place and is what is measured by any instrumentation. So for this example this is the execution of creating the zip file on the target. .. code-block:: python def run(self, context): cmd = 'cd {} && (time gzip {}) &>> {}' self.target.execute(cmd.format(self.target.working_directory, self.target_infile, self.target_outfile)) The ``extract_results`` method is used to extract any results from the target for example we want to pull the file containing the timing information that we will use to generate metrics for our workload and then we add this file as an artifact with a 'raw' kind, which means once WA has finished processing it will allow it to decide whether to keep the file or not. .. code-block:: python def extract_results(self, context): super(ZipTestWorkload, self).extract_results(context) # Pull the results file to the host self.host_outfile = os.path.join(context.output_directory, 'timing_results') self.target.pull(self.target_outfile, self.host_outfile) context.add_artifact('ziptest-results', self.host_outfile, kind='raw') The ``update_output`` method we can do any generation of metrics that we wish to for our workload. In this case we are going to simply convert the times reported into seconds and add them as 'metrics' to WA which can then be displayed to the user along with any others in a format dependant on which output processors they have enabled for the run. .. code-block:: python def update_output(self, context): super(ZipTestWorkload, self).update_output(context) # Extract metrics form the file's contents and update the result # with them. content = iter(open(self.host_outfile).read().strip().split()) for value, metric in zip(content, content): mins, secs = map(float, value[:-1].split('m')) context.add_metric(metric, secs + 60 * mins, 'seconds') Finally in the ``teardown`` method we will perform any required clean up for the workload so we will delete the input and output files from the device. .. code-block:: python def teardown(self, context): super(ZipTestWorkload, self).teardown(context) self.target.remove(self.target_infile) self.target.remove(self.target_outfile) The full implementation of this workload would look something like: .. code-block:: python import os from wa import Workload, Parameter class ZipTestWorkload(Workload): name = 'ziptest' description = ''' Times how long it takes to gzip a file of a particular size on a device. This workload was created for illustration purposes only. It should not be used to collect actual measurements. ''' parameters = [ Parameter('file_size', kind=int, default=2000000, constraint=lambda x: 0 < x, description='Size of the file (in bytes) to be gzipped.') ] def setup(self, context): super(ZipTestWorkload, self).setup(context) # Generate a file of the specified size containing random garbage. host_infile = os.path.join(context.output_directory, 'infile') command = 'openssl rand -base64 {} > {}'.format(self.file_size, host_infile) os.system(command) # Set up on-device paths devpath = self.target.path # os.path equivalent for the target self.target_infile = devpath.join(self.target.working_directory, 'infile') self.target_outfile = devpath.join(self.target.working_directory, 'outfile') # Push the file to the target self.target.push(host_infile, self.target_infile) def run(self, context): cmd = 'cd {} && (time gzip {}) &>> {}' self.target.execute(cmd.format(self.target.working_directory, self.target_infile, self.target_outfile)) def extract_results(self, context): super(ZipTestWorkload, self).extract_results(context) # Pull the results file to the host self.host_outfile = os.path.join(context.output_directory, 'timing_results') self.target.pull(self.target_outfile, self.host_outfile) context.add_artifact('ziptest-results', self.host_outfile, kind='raw') def update_output(self, context): super(ZipTestWorkload, self).update_output(context) # Extract metrics form the file's contents and update the result # with them. content = iter(open(self.host_outfile).read().strip().split()) for value, metric in zip(content, content): mins, secs = map(float, value[:-1].split('m')) context.add_metric(metric, secs + 60 * mins, 'seconds') def teardown(self, context): super(ZipTestWorkload, self).teardown(context) self.target.remove(self.target_infile) self.target.remove(self.target_outfile) .. _apkuiautomator-example: Adding a ApkUiAutomator Workload -------------------------------- If we wish to create a workload to automate the testing of the Google Docs android app, we would choose to perform the automation using UIAutomator and we would want to automatically deploy and install the apk file to the target, therefore we would choose the :ref:`ApkUiAutomator workload ` type with the following command:: $ wa create workload -k apkuiauto google_docs Workload created in $WA_USER_DIRECTORY/plugins/google_docs From here you can navigate to the displayed directory and you will find your ``__init__.py`` and a ``uiauto`` directory. The former is your python WA workload and will look something like this. For an example of what should be done in each of the main method please see :ref:`adding a basic example ` above. .. code-block:: python from wa import Parameter, ApkUiautoWorkload class GoogleDocs(ApkUiautoWorkload): name = 'google_docs' description = "This is an placeholder description" # Replace with a list of supported package names in the APK file(s). package_names = ['package_name'] parameters = [ # Workload parameters go here e.g. Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, description='This is an example parameter') ] def __init__(self, target, **kwargs): super(GoogleDocs, self).__init__(target, **kwargs) # Define any additional attributes required for the workload def init_resources(self, resolver): super(GoogleDocs, self).init_resources(resolver) # This method may be used to perform early resource discovery and # initialization. This is invoked during the initial loading stage and # before the device is ready, so cannot be used for any device-dependent # initialization. This method is invoked before the workload instance is # validated. def initialize(self, context): super(GoogleDocs, self).initialize(context) # This method should be used to perform once-per-run initialization of a # workload instance. def validate(self): super(GoogleDocs, self).validate() # Validate inter-parameter assumptions etc def setup(self, context): super(GoogleDocs, self).setup(context) # Perform any necessary setup before starting the UI automation def extract_results(self, context): super(GoogleDocs, self).extract_results(context) # Extract results on the target def update_output(self, context): super(GoogleDocs, self).update_output(context) # Update the output within the specified execution context with the # metrics and artifacts form this workload iteration. def teardown(self, context): super(GoogleDocs, self).teardown(context) # Perform any final clean up for the Workload. Depending on the purpose of your workload you can choose to implement which methods you require. The main things that need setting are the list of ``package_names`` which must be a list of strings containing the android package name that will be used during resource resolution to locate the relevant apk file for the workload. Additionally the the workload parameters will need to updating to any relevant parameters required by the workload as well as the description. The latter will contain a framework for performing the UI automation on the target, the files you will be most interested in will be ``uiauto/app/src/main/java/arm/wa/uiauto/UiAutomation.java`` which will contain the actual code of the automation and will look something like: .. code-block:: java package com.arm.wa.uiauto.google_docs; import android.app.Activity; import android.os.Bundle; import org.junit.Test; import org.junit.runner.RunWith; import android.support.test.runner.AndroidJUnit4; import android.util.Log; import android.view.KeyEvent; // Import the uiautomator libraries import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiScrollable; import android.support.test.uiautomator.UiSelector; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import com.arm.wa.uiauto.BaseUiAutomation; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation { protected Bundle parameters; protected int example_parameter; public static String TAG = "google_docs"; @Before public void initilize() throws Exception { // Perform any parameter initialization here parameters = getParams(); // Required to decode passed parameters. packageID = getPackageID(parameters); example_parameter = parameters.getInt("example_parameter"); } @Test public void setup() throws Exception { // Optional: Perform any setup required before the main workload // is ran, e.g. dismissing welcome screens } @Test public void runWorkload() throws Exception { // The main UI Automation code goes here } @Test public void extractResults() throws Exception { // Optional: Extract any relevant results from the workload, } @Test public void teardown() throws Exception { // Optional: Perform any clean up for the workload } } A few items to note from the template: - Each of the stages of execution for example ``setup``, ``runWorkload`` etc are decorated with the ``@Test`` decorator, this is important to allow these methods to be called at the appropriate time however any additional methods you may add do not require this decorator. - The ``initialize`` method has the ``@Before`` decorator, this is there to ensure that this method is called before executing any of the workload stages and therefore is used to decode and initialize any parameters that are passed in. - The code currently retrieves the ``example_parameter`` that was provided to the python workload as an Integer, there are similar calls to retrieve parameters of different types e.g. ``getString``, ``getBoolean``, ``getDouble`` etc. Once you have implemented your java workload you can use the file ``uiauto/build.sh`` to compile your automation into an apk file to perform the automation. The generated apk will be generated with the package name ``com.arm.wa.uiauto.`` which when running your workload will be automatically detected by the resource getters and deployed to the device. Adding a ReventApk Workload --------------------------- If we wish to create a workload to automate the testing of a UI based workload that we cannot / do not wish to use UiAutomator then we can perform the automation using revent. In this example we would want to automatically deploy and install an apk file to the target, therefore we would choose the :ref:`ApkRevent workload ` type with the following command:: $ wa create workload -k apkrevent my_game Workload created in $WA_USER_DIRECTORY/plugins/my_game This will generate a revent based workload you will end up with a very similar python file as to the one outlined in generating a :ref:`UiAutomator based workload ` however without the accompanying java automation files. The main difference between the two is that this workload will subclass ``ApkReventWorkload`` instead of ``ApkUiautomatorWorkload`` as shown below. .. code-block:: python from wa import ApkReventWorkload class MyGame(ApkReventWorkload): name = 'mygame' package_names = ['com.mylogo.mygame'] # .. --------------------------------------------------------------- .. _adding-an-instrument-example: Adding an Instrument ==================== This is an example of how we would create a instrument which will trace device errors using a custom "trace" binary file. For more detailed information please see the :ref:`Instrument Reference `. The first thing to do is to create a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass :class:`Instrument`. Make sure to overwrite the variable name with what we want our instrument to be called and then locate our binary for the instrument. :: class TraceErrorsInstrument(Instrument): name = 'trace-errors' def __init__(self, target, **kwargs): super(TraceErrorsInstrument, self).__init__(target, **kwargs) self.binary_name = 'trace' self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name) self.trace_on_target = None We then declare and implement the required methods as detailed in the :ref:`Instrument API `. For the ``initialize`` method, we want to install the executable file to the target so we can use the target's ``install`` method which will try to copy the file to a location on the device that supports execution, change the file mode appropriately and return the file path on the target. :: def initialize(self, context): self.trace_on_target = self.target.install(self.binary_file) Then we implemented the start method, which will simply run the file to start tracing. Supposing that the call to this binary requires some overhead to begin collecting errors we might want to decorate the method with the ``@slow`` decorator to try and reduce the impact on other running instruments. For more information on prioritization please see the :ref:`Developer Reference `. :: @slow def start(self, context): self.target.execute('{} start'.format(self.trace_on_target)) Lastly, we need to stop tracing once the workload stops and this happens in the stop method, assuming stopping the collection also require some overhead we have again decorated the method. :: @slow def stop(self, context): self.target.execute('{} stop'.format(self.trace_on_target)) Once we have generated our result data we need to retrieve it from the device for further processing or adding directly to WA's output for that job. For example for trace data we will want to pull it to the device and add it as a :ref:`artifact ` to WA's :ref:`context `. Once we have retrieved the data, we can now do any further processing and add any relevant :ref:`Metrics ` to the :ref:`context `. For this we will use the the ``add_metric`` method to add the results to the final output for that workload. The method can be passed 4 params, which are the metric `key`, `value`, `unit` and `lower_is_better`. :: def update_output(self, context): # pull the trace file from the target self.result = os.path.join(self.target.working_directory, 'trace.txt') self.outfile = os.path.join(context.output_directory, 'trace.txt') self.target.pull(self.result, self.outfile) context.add_artifact('error_trace', self.outfile, kind='export') # parse the file if needs to be parsed, or add result directly to # context. metric = # .. context.add_metric('number_of_errors', metric, lower_is_better=True At the end of each job we might want to delete any files generated by the instruments and the code to clear these file goes in teardown method. :: def teardown(self, context): self.target.remove(os.path.join(self.target.working_directory, 'trace.txt')) At the very end of the run we would want to uninstall the binary we deployed earlier. :: def finalize(self, context): self.target.uninstall(self.binary_name) So the full example would look something like:: from wa import Instrument class TraceErrorsInstrument(Instrument): name = 'trace-errors' def __init__(self, target, **kwargs): super(TraceErrorsInstrument, self).__init__(target, **kwargs) self.binary_name = 'trace' self.binary_file = os.path.join(os.path.dirname(__file__), self.binary_name) self.trace_on_target = None def initialize(self, context): self.trace_on_target = self.target.install(self.binary_file) @slow def start(self, context): self.target.execute('{} start'.format(self.trace_on_target)) @slow def stop(self, context): self.target.execute('{} stop'.format(self.trace_on_target)) def update_output(self, context): self.result = os.path.join(self.target.working_directory, 'trace.txt') self.outfile = os.path.join(context.output_directory, 'trace.txt') self.target.pull(self.result, self.outfile) context.add_artifact('error_trace', self.outfile, kind='export') metric = # .. context.add_metric('number_of_errors', metric, lower_is_better=True def teardown(self, context): self.target.remove(os.path.join(self.target.working_directory, 'trace.txt')) def finalize(self, context): self.target.uninstall(self.binary_name) .. _adding-an-output-processor-example: Adding an Output Processor ========================== This is an example of how we would create an output processor which will format the run metrics as a column-aligned table. The first thing to do is to create a new file under ``$WA_USER_DIRECTORY/plugins/`` and subclass :class:`OutputProcessor`. Make sure to overwrite the variable name with what we want our processor to be called and provide a short description. Next we need to implement any relevant methods, (please see :ref:`adding an output processor ` for all the available methods). In this case we only want to implement the ``export_run_output`` method as we are not generating any new artifacts and we only care about the overall output rather than the individual job outputs. The implementation is very simple, it just loops through all the available metrics for all the available jobs and adds them to a list which is written to file and then added as an :ref:`artifact ` to the :ref:`context `. .. code-block:: python import os from wa import OutputProcessor from wa.utils.misc import write_table class Table(OutputProcessor): name = 'table' description = 'Generates a text file containing a column-aligned table of run results.' def export_run_output(self, output, target_info): rows = [] for job in output.jobs: for metric in job.metrics: rows.append([metric.name, str(metric.value), metric.units or '', metric.lower_is_better and '-' or '+']) outfile = output.get_path('table.txt') with open(outfile, 'w') as wfh: write_table(rows, wfh) output.add_artifact('results_table', 'table.txt', 'export') .. _adding-custom-target-example: Adding a Custom Target ====================== This is an example of how we would create a customised target, this is typically used where we would need to augment the existing functionality for example on development boards where we need to perform additional actions to implement some functionality. In this example we are going to assume that this particular device is running Android and requires a special "wakeup" command to be sent before it can execute any other command. To add a new target to WA we will first create a new file in ``$WA_USER_DIRECTORY/plugins/example_target.py``. In order to facilitate with creating a new target WA provides a helper function to create a description for the specified target class, and specified components. For components that are not explicitly specified it will attempt to guess sensible defaults based on the target class' bases. .. code-block:: python # Import our helper function from wa import add_description_for_target # Import the Target that our custom implementation will be based on from devlib import AndroidTarget class ExampleTarget(AndroidTarget): # Provide the name that will be used to identify your custom target name = 'example_target' # Override our custom method(s) def execute(self, *args, **kwargs): super(ExampleTarget, self).execute('wakeup', check_exit_code=False) return super(ExampleTarget, self).execute(*args, **kwargs) description = '''An Android target which requires an explicit "wakeup" command to be sent before accepting any other command''' # Call the helper function with our newly created function and its description. add_description_for_target(ExampleTarget, description) ================================================ FILE: doc/source/developer_information/how_tos/processing_output.rst ================================================ .. _processing_output: Processing WA Output ==================== This section will illustrate the use of WA's :ref:`output processing API ` by creating a simple ASCII report generator. To make things concrete, this how-to will be processing the output from running the following agenda:: sections: - runtime_params: frequency: min classifiers: frequency: min - runtime_params: frequency: max classifiers: frequency: max workloads: - sysbench - deepbench This runs two workloads under two different configurations each -- once with CPU frequency fixed to max, and once with CPU frequency fixed to min. Classifiers are used to indicate the configuration in the output. First, create the :class:`RunOutput` object, which is the main interface for interacting with WA outputs. Or alternatively a :class:`RunDatabaseOutput` if storing your results in a postgres database. .. code-block:: python import sys from wa import RunOutput # Path to the output directory specified in the first argument ro = RunOutput(sys.argv[1]) Run Info -------- Next, we're going to print out an overall summary of the run. .. code-block:: python from __future__ import print_function # for Python 2 compat. from wa.utils.misc import format_duration print('-'*20) print('Run ID:', ro.info.uuid) print('Run status:', ro.status) print('Run started at:', ro.info.start_time.isoformat()) print('Run completed at:', ro.info.end_time.isoformat()) print('Run duration:', format_duration(ro.info.duration)) print('Ran', len(ro.jobs), 'jobs') print('-'*20) print() ``RunOutput.info`` is an instance of :class:`RunInfo` which encapsulates Overall-run metadata, such as the duration. Target Info ----------- Next, some information about the device the results where collected on. .. code-block:: python print(' Target Information ') print(' ------------------- ') print('hostname:', ro.target_info.hostname) if ro.target_info.os == 'android': print('Android ID:', ro.target_info.android_id) else: print('host ID:', ro.target_info.hostid) print('CPUs:', ', '.join(cpu.name for cpu in ro.target_info.cpus)) print() print('OS:', ro.target_info.os) print('ABI:', ro.target_info.abi) print('rooted:', ro.target_info.is_rooted) print('kernel version:', ro.target_info.kernel_version) print('os version:') for k, v in ro.target_info.os_version.items(): print('\t', k+':', v) print() print('-'*27) print() ``RunOutput.target_info`` is an instance of :class:`TargetInfo` that contains information collected from the target during the run. Jobs Summary ------------ Next, show a summary of executed jobs. .. code-block:: python from wa.utils.misc import write_table print(' Jobs ') print(' ---- ') print() rows = [] for job in ro.jobs: rows.append([job.id, job.label, job.iteration, job.status]) write_table(rows, sys.stdout, align='<<><', headers=['ID', 'LABEL', 'ITER.', 'STATUS']) print() print('-'*27) print() ``RunOutput.jobs`` is a list of :class:`JobOutput` objects. These contain information about that particular job, including its execution status, and :ref:`metrics` and :ref:`artifact` generated by the job. Compare Metrics --------------- Finally, collect metrics, sort them by the "frequency" classifier. Classifiers that are present in the metric but not its job have been added by the workload. For the purposes of this report, they will be used to augment the metric's name. .. code-block:: python from collections import defaultdict print() print(' Metrics Comparison ') print(' ------------------ ') print() scores = defaultdict(lambda: defaultdict(lambda: defaultdict())) for job in ro.jobs: for metric in job.metrics: workload = job.label name = metric.name freq = job.classifiers['frequency'] for cname, cval in sorted(metric.classifiers.items()): if cname not in job.classifiers: # was not propagated from the job, therefore was # added by the workload name += '/{}={}'.format(cname, cval) scores[workload][name][freq] = metric Once the metrics have been sorted, generate the report showing the delta between the two configurations (indicated by the "frequency" classifier) and highlight any unexpected deltas (based on the ``lower_is_better`` attribute of the metric). (In practice, you will want to run multiple iterations of each configuration, calculate averages and standard deviations, and only highlight statically significant deltas.) .. code-block:: python rows = [] for workload in sorted(scores.keys()): wldata = scores[workload] for name in sorted(wldata.keys()): min_score = wldata[name]['min'].value max_score = wldata[name]['max'].value delta = max_score - min_score units = wldata[name]['min'].units or '' lib = wldata[name]['min'].lower_is_better warn = '' if (lib and delta > 0) or (not lib and delta < 0): warn = '!!!' rows.append([workload, name, '{:.3f}'.format(min_score), '{:.3f}'.format(max_score), '{:.3f}'.format(delta), units, warn]) # separate workloads with a blank row rows.append(['', '', '', '', '', '', '']) write_table(rows, sys.stdout, align='<<>>><<', headers=['WORKLOAD', 'METRIC', 'MIN.', 'MAX', 'DELTA', 'UNITS', '']) print() print('-'*27) This concludes this how-to. For more information, please see :ref:`output processing API documentation `. Complete Example ---------------- Below is the complete example code, and a report it generated for a sample run. .. code-block:: python from __future__ import print_function # for Python 2 compat. import sys from collections import defaultdict from wa import RunOutput from wa.utils.misc import format_duration, write_table # Path to the output directory specified in the first argument ro = RunOutput(sys.argv[1]) print('-'*27) print('Run ID:', ro.info.uuid) print('Run status:', ro.status) print('Run started at:', ro.info.start_time.isoformat()) print('Run completed at:', ro.info.end_time.isoformat()) print('Run duration:', format_duration(ro.info.duration)) print('Ran', len(ro.jobs), 'jobs') print('-'*27) print() print(' Target Information ') print(' ------------------- ') print('hostname:', ro.target_info.hostname) if ro.target_info.os == 'android': print('Android ID:', ro.target_info.android_id) else: print('host ID:', ro.target_info.hostid) print('CPUs:', ', '.join(cpu.name for cpu in ro.target_info.cpus)) print() print('OS:', ro.target_info.os) print('ABI:', ro.target_info.abi) print('rooted:', ro.target_info.is_rooted) print('kernel version:', ro.target_info.kernel_version) print('OS version:') for k, v in ro.target_info.os_version.items(): print('\t', k+':', v) print() print('-'*27) print() print(' Jobs ') print(' ---- ') print() rows = [] for job in ro.jobs: rows.append([job.id, job.label, job.iteration, job.status]) write_table(rows, sys.stdout, align='<<><', headers=['ID', 'LABEL', 'ITER.', 'STATUS']) print() print('-'*27) print() print(' Metrics Comparison ') print(' ------------------ ') print() scores = defaultdict(lambda: defaultdict(lambda: defaultdict())) for job in ro.jobs: for metric in job.metrics: workload = job.label name = metric.name freq = job.classifiers['frequency'] for cname, cval in sorted(metric.classifiers.items()): if cname not in job.classifiers: # was not propagated from the job, therefore was # added by the workload name += '/{}={}'.format(cname, cval) scores[workload][name][freq] = metric rows = [] for workload in sorted(scores.keys()): wldata = scores[workload] for name in sorted(wldata.keys()): min_score = wldata[name]['min'].value max_score = wldata[name]['max'].value delta = max_score - min_score units = wldata[name]['min'].units or '' lib = wldata[name]['min'].lower_is_better warn = '' if (lib and delta > 0) or (not lib and delta < 0): warn = '!!!' rows.append([workload, name, '{:.3f}'.format(min_score), '{:.3f}'.format(max_score), '{:.3f}'.format(delta), units, warn]) # separate workloads with a blank row rows.append(['', '', '', '', '', '', '']) write_table(rows, sys.stdout, align='<<>>><<', headers=['WORKLOAD', 'METRIC', 'MIN.', 'MAX', 'DELTA', 'UNITS', '']) print() print('-'*27) Sample output:: --------------------------- Run ID: 78aef931-cd4c-429b-ac9f-61f6893312e6 Run status: OK Run started at: 2018-06-27T12:55:23.746941 Run completed at: 2018-06-27T13:04:51.067309 Run duration: 9 minutes 27 seconds Ran 4 jobs --------------------------- Target Information ------------------- hostname: localhost Android ID: b9d1d8b48cfba007 CPUs: A53, A53, A53, A53, A73, A73, A73, A73 OS: android ABI: arm64 rooted: True kernel version: 4.9.75-04208-g2c913991a83d-dirty 114 SMP PREEMPT Wed May 9 10:33:36 BST 2018 OS version: all_codenames: O base_os: codename: O incremental: eng.valsch.20170517.180115 preview_sdk: 0 release: O sdk: 25 security_patch: 2017-04-05 --------------------------- Jobs ---- ID LABEL ITER. STATUS -- ----- ----- ------ s1-wk1 sysbench 1 OK s1-wk2 deepbench 1 OK s2-wk1 sysbench 1 OK s2-wk2 deepbench 1 OK --------------------------- Metrics Comparison ------------------ WORKLOAD METRIC MIN. MAX DELTA UNITS -------- ------ ---- --- ----- ----- deepbench GOPS/a_t=n/b_t=n/k=1024/m=128/n=1 0.699 0.696 -0.003 !!! deepbench GOPS/a_t=n/b_t=n/k=1024/m=3072/n=1 0.471 0.715 0.244 deepbench GOPS/a_t=n/b_t=n/k=1024/m=3072/n=1500 23.514 36.432 12.918 deepbench GOPS/a_t=n/b_t=n/k=1216/m=64/n=1 0.333 0.333 -0.000 !!! deepbench GOPS/a_t=n/b_t=n/k=128/m=3072/n=1 0.405 1.073 0.668 deepbench GOPS/a_t=n/b_t=n/k=128/m=3072/n=1500 19.914 34.966 15.052 deepbench GOPS/a_t=n/b_t=n/k=128/m=4224/n=1 0.232 0.486 0.255 deepbench GOPS/a_t=n/b_t=n/k=1280/m=128/n=1500 20.721 31.654 10.933 deepbench GOPS/a_t=n/b_t=n/k=1408/m=128/n=1 0.701 0.702 0.001 deepbench GOPS/a_t=n/b_t=n/k=1408/m=176/n=1500 19.902 29.116 9.214 deepbench GOPS/a_t=n/b_t=n/k=176/m=4224/n=1500 26.030 39.550 13.519 deepbench GOPS/a_t=n/b_t=n/k=2048/m=35/n=700 10.884 23.615 12.731 deepbench GOPS/a_t=n/b_t=n/k=2048/m=5124/n=700 26.740 37.334 10.593 deepbench execution_time 318.758 220.629 -98.129 seconds !!! deepbench time (msec)/a_t=n/b_t=n/k=1024/m=128/n=1 0.375 0.377 0.002 !!! deepbench time (msec)/a_t=n/b_t=n/k=1024/m=3072/n=1 13.358 8.793 -4.565 deepbench time (msec)/a_t=n/b_t=n/k=1024/m=3072/n=1500 401.338 259.036 -142.302 deepbench time (msec)/a_t=n/b_t=n/k=1216/m=64/n=1 0.467 0.467 0.000 !!! deepbench time (msec)/a_t=n/b_t=n/k=128/m=3072/n=1 1.943 0.733 -1.210 deepbench time (msec)/a_t=n/b_t=n/k=128/m=3072/n=1500 59.237 33.737 -25.500 deepbench time (msec)/a_t=n/b_t=n/k=128/m=4224/n=1 4.666 2.224 -2.442 deepbench time (msec)/a_t=n/b_t=n/k=1280/m=128/n=1500 23.721 15.528 -8.193 deepbench time (msec)/a_t=n/b_t=n/k=1408/m=128/n=1 0.514 0.513 -0.001 deepbench time (msec)/a_t=n/b_t=n/k=1408/m=176/n=1500 37.354 25.533 -11.821 deepbench time (msec)/a_t=n/b_t=n/k=176/m=4224/n=1500 85.679 56.391 -29.288 deepbench time (msec)/a_t=n/b_t=n/k=2048/m=35/n=700 9.220 4.249 -4.970 deepbench time (msec)/a_t=n/b_t=n/k=2048/m=5124/n=700 549.413 393.517 -155.896 sysbench approx. 95 percentile 3.800 1.450 -2.350 ms sysbench execution_time 1.790 1.437 -0.353 seconds !!! sysbench response time avg 1.400 1.120 -0.280 ms sysbench response time max 40.740 42.760 2.020 ms !!! sysbench response time min 0.710 0.710 0.000 ms sysbench thread fairness events avg 1250.000 1250.000 0.000 sysbench thread fairness events stddev 772.650 213.040 -559.610 sysbench thread fairness execution time avg 1.753 1.401 -0.352 !!! sysbench thread fairness execution time stddev 0.000 0.000 0.000 sysbench total number of events 10000.000 10000.000 0.000 sysbench total time 1.761 1.409 -0.352 s --------------------------- ================================================ FILE: doc/source/developer_information.rst ================================================ ===================== Developer Information ===================== .. contents:: Contents :depth: 4 :local: ------------------ .. include:: developer_information/developer_guide.rst ------------------ .. include:: developer_information/how_to.rst ------------------ .. include:: developer_information/developer_reference.rst ================================================ FILE: doc/source/faq.rst ================================================ .. _faq: FAQ === .. contents:: :depth: 1 :local: --------------------------------------------------------------------------------------- **Q:** I receive the error: ``"< file file> could not be found."`` ----------------------------------------------------------------------------------------- **A:** Some workload e.g. AdobeReader, GooglePhotos etc require external asset files. We host some additional workload dependencies in the `WA Assets Repo `_. To allow WA to try and automatically download required assets from the repository please add the following to your configuration: .. code-block:: YAML remote_assets_url: https://raw.githubusercontent.com/ARM-software/workload-automation-assets/master/dependencies ------------ **Q:** I receive the error: ``"No matching package found for workload "`` ------------------------------------------------------------------------------------ **A:** WA cannot locate the application required for the workload. Please either install the application onto the device or source the apk and place into ``$WA_USER_DIRECTORY/dependencies/`` ------------ **Q:** I am trying to set a valid runtime parameters however I still receive the error ``"Unknown runtime parameter"`` ------------------------------------------------------------------------------------------------------------------------- **A:** Please ensure you have the corresponding module loaded on the device. See :ref:`Runtime Parameters ` for the list of runtime parameters and their containing modules, and the appropriate section in :ref:`setting up a device ` for ensuring it is installed. ------------- **Q:** I have a big.LITTLE device but am unable to set parameters corresponding to the big or little core and receive the error ``"Unknown runtime parameter"`` ----------------------------------------------------------------------------------------------------------------------------------------------------------------- **A:** Please ensure you have the hot plugging module enabled for your device (Please see question above). **A:** This can occur if the device uses dynamic hot-plugging and although WA will try to online all cores to perform discovery sometimes this can fail causing to WA to incorrectly assume that only one cluster is present. To workaround this please set the ``core_names`` :ref:`parameter ` in the configuration for your device. **Q:** I receive the error ``Could not find plugin or alias "standard"`` ------------------------------------------------------------------------ **A:** Upon first use of WA3, your WA2 config file typically located at ``$USER_HOME/config.py`` will have been converted to a WA3 config file located at ``$USER_HOME/config.yaml``. The "standard" output processor, present in WA2, has been merged into the core framework and therefore no longer exists. To fix this error please remove the "standard" entry from the "augmentations" list in the WA3 config file. **Q:** My Juno board keeps resetting upon starting WA even if it hasn't crashed. -------------------------------------------------------------------------------- **A** Please ensure that you do not have any other terminals (e.g. ``screen`` sessions) connected to the board's UART. When WA attempts to open the connection for its own use this can cause the board to reset if a connection is already present. **Q:** I'm using the FPS instrument but I do not get any/correct results for my workload ----------------------------------------------------------------------------------------- **A:** If your device is running with Android 6.0 + then the default utility for collecting fps metrics will be ``gfxinfo`` however this does not seem to be able to extract any meaningful information for some workloads. In this case please try setting the ``force_surfaceflinger`` parameter for the ``fps`` augmentation to ``True``. This will attempt to guess the "View" for the workload automatically however this is device specific and therefore may need customizing. If this is required please open the application and execute ``dumpsys SurfaceFlinger --list`` on the device via adb. This will provide a list of all views available for measuring. As an example, when trying to find the view for the AngryBirds Rio workload you may get something like: .. code-block:: none ... AppWindowToken{41dfe54 token=Token{77819a7 ActivityRecord{a151266 u0 com.rovio.angrybirdsrio/com.rovio.fusion.App t506}}}#0 a3d001c com.rovio.angrybirdsrio/com.rovio.fusion.App#0 Background for -SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0 SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0 com.rovio.angrybirdsrio/com.rovio.fusion.App#0 boostedAnimationLayer#0 mAboveAppWindowsContainers#0 ... From these ``"SurfaceView - com.rovio.angrybirdsrio/com.rovio.fusion.App#0"`` is the mostly likely the View that needs to be set as the ``view`` workload parameter and will be picked up be the ``fps`` augmentation. **Q:** I am getting an error which looks similar to ``'CONFIG_SND_BT87X is not exposed in kernel config'...`` ------------------------------------------------------------------------------------------------------------- **A:** If you are receiving this under normal operation this can be caused by a mismatch of your WA and devlib versions. Please update both to their latest versions and delete your ``$USER_HOME/.workload_automation/cache/targets.json`` (or equivalent) file. **Q:** I get an error which looks similar to ``UnicodeDecodeError('ascii' codec can't decode byte...`` ------------------------------------------------------------------------------------------------------ **A:** If you receive this error or a similar warning about your environment, please ensure that you configure your environment to use a locale which supports UTF-8. Otherwise this can cause issues when attempting to parse files containing none ascii characters. **Q:** I get the error ``Module "X" failed to install on target`` ------------------------------------------------------------------------------------------------------ **A:** By default a set of devlib modules will be automatically loaded onto the target designed to add additional functionality. If the functionality provided by the module is not required then the module can be safely disabled by setting ``load_default_modules`` to ``False`` in the ``device_config`` entry of the :ref:`agenda ` and then re-enabling any specific modules that are still required. An example agenda snippet is shown below: .. code-block:: none config: device: generic_android device_config: load_default_modules: False modules: ['list', 'of', 'modules', 'to', 'enable'] ================================================ FILE: doc/source/glossary.rst ================================================ .. _glossary: Glossary ======== .. glossary:: Agenda An agenda specifies what is to be done during a Workload Automation run. This includes which workloads will be run, with what configuration and which augmentations will be enabled, etc. (For more information please see the :ref:`Agenda Reference `.) Alias An alias associated with a workload or a parameter. In case of parameters, this is simply an alternative name for a parameter; Usually these are employed to provide backward compatibility for renamed parameters, or in cases where a there are several commonly used terms, each equally valid, for something. In case of Workloads, aliases can also be merely alternatives to the workload name, however they can also alter the default values for the parameters the Workload is instantiated with. A common scenario is when a single workload can be run under several distinct configurations (e.g. has several alternative tests that might be run) that are configurable via a parameter. An alias may be added for each such configuration. In order to see the available aliases for a workload, one can use :ref:`show command `\ . .. seealso:: :term:`Global Alias` Artifact An artifact is something that was been generated as part of the run for example a file containing output or meta data in the form of log files. WA supports multiple "kinds" of artifacts and will handle them accordingly, for more information please see the :ref:`Developer Reference `. Augmentation Augmentations are plugins that augment the execution of workload jobs with additional functionality; usually, that takes the form of generating additional metrics and/or artifacts, such as traces or logs. For more information please see :ref:`augmentations `. Classifier An arbitrary key-value pair that may associated with a :term:`job`\ , a :term:`metric`\ , or an :term:`artifact`. The key must be a string. The value can be any simple scalar type (string, integer, boolean, etc). These have no pre-defined meaning but may be used to aid filtering/grouping of metrics and artifacts during output processing. .. seealso:: :ref:`classifiers`. Global Alias Typically, values for plugin parameters are specified name spaced under the plugin's name in the configuration. A global alias is an alias that may be specified at the top level in configuration. There two common reasons for this. First, several plugins might specify the same global alias for the same parameter, thus allowing all of them to be configured with one settings. Second, a plugin may not be exposed directly to the user (e.g. resource getters) so it makes more sense to treat its parameters as global configuration values. .. seealso:: :term:`Alias` Instrument A WA "Instrument" can be quite diverse in its functionality, but the majority of those available in are there to collect some kind of additional data (such as trace, energy readings etc.) from the device during workload execution. To see available instruments please use the :ref:`list command ` or see the :ref:`Plugin Reference `. Job An single execution of a workload. A job is defined by an associated :term:`spec`. However, multiple jobs can share the same spec; E.g. Even if you only have 1 workload to run but wanted 5 iterations then 5 individual jobs will be generated to be run. Metric A single numeric measurement or score collected during job execution. Output Processor An "Output Processor" is what is used to process the output generated by a workload. They can simply store the results in a presentable format or use the information collected to generate additional metrics. To see available output processors please use the :ref:`list command ` or see the :ref:`Plugin Reference `. Run A single execution of `wa run` command. A run consists of one or more :term:`job`\ s, and results in a single output directory structure containing job results and metadata. Section A set of configurations for how jobs should be run. The settings in them take less precedence than workload-specific settings. For every section, all jobs will be run again, with the changes specified in the section's agenda entry. Sections are useful for several runs in which global settings change. Spec A specification of a workload. For example you can have a single workload specification that is then executed multiple times if you desire multiple iterations but the configuration for the workload will remain the same. In WA2 the term "iteration" used to refer to the same underlying idea as spec now does. It should be noted however, that this is no longer the case and an iteration is merely a configuration point in WA3. Spec is to blueprint as job is to product. WA Workload Automation. The full name of this framework. Workload A workload is the lowest level specification for tasks that need to be run on a target. A workload can have multiple iterations, and be run additional multiples of times dependent on the number of sections. ================================================ FILE: doc/source/index.rst ================================================ .. Workload Automation 3 documentation master file, ================================================ Welcome to Documentation for Workload Automation ================================================ Workload Automation (WA) is a framework for executing workloads and collecting measurements on Android and Linux devices. WA includes automation for nearly 40 workloads and supports some common instrumentation (ftrace, hwmon) along with a number of output formats. WA is designed primarily as a developer tool/framework to facilitate data driven development by providing a method of collecting measurements from a device in a repeatable way. WA is highly extensible. Most of the concrete functionality is implemented via :ref:`plug-ins `, and it is easy to :ref:`write new plug-ins ` to support new device types, workloads, instruments or output processing. .. note:: To see the documentation of individual plugins please see the :ref:`Plugin Reference `. .. contents:: Contents What's New ========== .. toctree:: :maxdepth: 1 changes migration_guide User Information ================ This section lists general usage documentation. If you're new to WA3, it is recommended you start with the :ref:`User Guide ` page. This section also contains installation and configuration guides. .. toctree:: :maxdepth: 3 user_information .. _in-depth: Developer Information ===================== This section contains more advanced topics, such how to write your own Plugins and detailed descriptions of how WA functions under the hood. .. toctree:: :maxdepth: 3 developer_information Plugin Reference ================ .. toctree:: :maxdepth: 2 plugins API === .. toctree:: :maxdepth: 2 api Glossary ======== .. toctree:: :maxdepth: 2 glossary FAQ ==== .. toctree:: :maxdepth: 2 faq ================================================ FILE: doc/source/instrument_method_map.template ================================================ .. _instruments_method_map: Instrumentation Signal-Method Mapping ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Instrument methods get automatically hooked up to signals based on their names. Mostly, the method name corresponds to the name of the signal, however there are a few convenience aliases defined (listed first) to make easier to relate instrumentation code to the workload execution model. For an overview on when these signals are dispatched during execution please see the :ref:`Developer Reference `. $signal_names The methods above may be decorated with on the listed decorators to set the priority (a value in the ``wa.framework.signal.CallbackPriority`` enum) of the Instrument method relative to other callbacks registered for the signal (within the same priority level, callbacks are invoked in the order they were registered). The table below shows the mapping of the decorator to the corresponding priority name and level: $priority_prefixes Unresponsive Targets ~~~~~~~~~~~~~~~~~~~~ If a target is believed to be unresponsive, instrument callbacks will be disabled to prevent a cascade of errors and potential corruptions of state, as it is generally assumed that instrument callbacks will want to do something with the target. If your callback only does something with the host, and does not require an active target connection, you can decorate it with ``@hostside`` decorator to ensure it gets invoked even if the target becomes unresponsive. ================================================ FILE: doc/source/migration_guide.rst ================================================ .. _migration-guide: Migration Guide ================ .. contents:: Contents :depth: 4 :local: Users """"" Configuration -------------- Default configuration file change ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Instead of the standard ``config.py`` file located at ``$WA_USER_DIRECTORY/config.py`` WA now uses a ``confg.yaml`` file (at the same location) which is written in the YAML format instead of python. Additionally upon first invocation WA3 will automatically try and detect whether a WA2 config file is present and convert it to use the new WA3 format. During this process any known parameter name changes should be detected and updated accordingly. Plugin Changes ^^^^^^^^^^^^^^^ Please note that not all plugins that were available for WA2 are currently available for WA3 so you may need to remove plugins that are no longer present from your config files. One plugin of note is the ``standard`` results processor, this has been removed and it's functionality built into the core framework. -------------------------------------------------------- Agendas ------- WA3 is designed to keep configuration as backwards compatible as possible so most agendas should work out of the box, however the main changes in the style of WA3 agendas are: Global Section ^^^^^^^^^^^^^^ The ``global`` and ``config`` sections have been merged so now all configuration that was specified under the "global" keyword can now also be specified under "config". Although "global" is still a valid keyword you will need to ensure that there are not duplicated entries in each section. Instrumentation and Results Processors merged ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``instrumentation`` and ``results_processors`` sections from WA2 have now been merged into a single ``augmentations`` section to simplify the configuration process. Although for backwards compatibility, support for the old sections has be retained. Per workload enabling of augmentations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All augmentations can now been enabled and disabled on a per workload basis. Setting Runtime Parameters ^^^^^^^^^^^^^^^^^^^^^^^^^^ :ref:`Runtime Parameters ` are now the preferred way of configuring, cpufreq, hotplug and cpuidle rather setting the corresponding sysfile values as this will perform additional validation and ensure the nodes are set in the correct order to avoid any conflicts. Parameter Changes ^^^^^^^^^^^^^^^^^ Any parameter names changes listed below will also have their old names specified as aliases and should continue to work as normal, however going forward the new parameter names should be preferred: - The workload parameter :confval:`clean_up` has be renamed to :confval:`cleanup_assets` to better reflect its purpose. - The workload parameter :confval:`check_apk` has been renamed to :confval:`prefer_host_package` to be more explicit in it's functionality to indicated whether a package on the target or the host should have priority when searching for a suitable package. - The execution order ``by_spec`` is now called ``by_workload`` for clarity of purpose. For more information please see :ref:`configuration-specification`. - The ``by_spec`` reboot policy has been removed as this is no longer relevant and the ``each_iteration`` reboot policy has been renamed to ``each_job``, please see :ref:`configuration-specification` for more information. Individual workload parameters have been attempted to be standardized for the more common operations e.g.: - :confval:`iterations` is now :confval:`loops` to indicate the how many 'tight loops' of the workload should be performed, e.g. without the setup/teardown method calls. - :confval:`num_threads` is now consistently :confval:`threads` across workloads. - :confval:`run_timeout` is now consistently :confval:`timeout` across workloads. - :confval:`taskset_mask` and :confval:`cpus` have been changed to consistently be referred to as :confval:`cpus` and its types is now a :class:`cpu_mask` type allowing configuration to be supplied either directly as a mask, as a list of a list of cpu indexes or as a sysfs-style string. Output ^^^^^^^ Output Directory ~~~~~~~~~~~~~~~~ The :ref:`output directory `'s structure has changed layout and now includes additional subdirectories. There is now a ``__meta`` directory that contains copies of the agenda and config files supplied to WA for that particular run so that all the relevant config is self contained. Additionally if one or more jobs fail during a run then corresponding output directory will be moved into a ``__failed`` subdirectory to allow for quicker analysis. Output API ~~~~~~~~~~ There is now an Output API which can be used to more easily post process the output from a run. For more information please see the :ref:`Output API ` documentation. ----------------------------------------------------------- Developers """""""""""" Framework --------- Imports ^^^^^^^ To distinguish between the different versions of WA, WA3's package name has been renamed to ``wa``. This means that all the old ``wlauto`` imports will need to be updated. For more information please see the corresponding section in the :ref:`developer reference section` Asset Deployment ^^^^^^^^^^^^^^^^^^ WA3 now contains a generic assets deployment and clean up mechanism so if a workload was previously doing this in an ad-hoc manner this should be updated to utilize the new functionality. To make use of this functionality a list of assets should be set as the workload ``deployable_assets`` attribute, these will be automatically retrieved via WA's resource getters and deployed either to the targets working directory or a custom directory specified as the workloads ``assets_directory`` attribute. If a custom implementation is required the ``deploy_assets`` method should be overridden inside the workload. To allow for the removal of the additional assets any additional file paths should be added to the ``self.deployed_assets`` list which is used to keep track of any assets that have been deployed for the workload. This is what is used by the generic ``remove_assets`` method to clean up any files deployed to the target. Optionally if the file structure of the deployed assets requires additional logic then the ``remove_assets`` method can be overridden for a particular workload as well. -------------------------------------------------------- Workloads --------- Python Workload Structure ^^^^^^^^^^^^^^^^^^^^^^^^^^ - The ``update_results`` method has been split out into 2 stages. There is now ``extract_results`` and ``update_output`` which should be used for extracting any results from the target back to the host system and to update the output with any metrics or artefacts for the specific workload iteration respectively. - WA now features :ref:`execution decorators ` which can be used to allow for more efficient binary deployment and that they are only installed to the device once per run. For more information of implementing this please see :ref:`deploying executables to a target `. APK Functionality ^^^^^^^^^^^^^^^^^ All apk functionality has re-factored into an APKHandler object which is available as the apk attribute of the workload. This means that for example ``self.launchapplication()`` would now become ``self.apk.start_activity()`` UiAutomator Java Structure ^^^^^^^^^^^^^^^^^^^^^^^^^^ Instead of a single ``runUiAutomation`` method to perform all of the UiAutomation, the structure has been refactored into 5 methods that can optionally be overridden. The available methods are ``initialize``, ``setup``, ``runWorkload``, ``extactResults`` and ``teardown`` to better mimic the different stages in the python workload. - ``initialize`` should be used to retrieve and set any relevant parameters required during the workload. - ``setup`` should be used to perform any setup required for the workload, for example dismissing popups or configuring and required settings. - ``runWorkload`` should be used to perform the actual measurable work of the workload. - ``extractResults`` should be used to extract any relevant results from the target after the workload has been completed. - ``teardown`` should be used to perform any final clean up of the workload on the target. .. note:: The ``initialize`` method should have the ``@Before`` tag attached to the method which will cause it to be ran before each of the stages of the workload. The remaining method should all have the ``@Test`` tag attached to the method to indicate that this is a test stage that should be called at the appropriate time. GUI Functionality ^^^^^^^^^^^^^^^^^ For UI based applications all UI functionality has been re-factored to into a ``gui`` attribute which currently will be either a ``UiAutomatorGUI`` object or a ``ReventGUI`` depending on the workload type. This means that for example if you wish to pass parameters to a UiAuotmator workload you will now need to use ``self.gui.uiauto_params['Parameter Name'] = value`` Attributes ^^^^^^^^^^ - The old ``package`` attribute has been replaced by ``package_names`` which expects a list of strings which allows for multiple package names to be specified if required. It is also no longer required to explicitly state the launch-able activity, this will be automatically discovered from the apk so this workload attribute can be removed. - The ``device`` attribute of the workload is now a devlib ``target``. Some of the command names remain the same, however there will be differences. The API can be found at http://devlib.readthedocs.io/en/latest/target.html however some of the more common changes can be found below: +----------------------------------------------+---------------------------------+ | Original Method | New Method | +----------------------------------------------+---------------------------------+ |``self.device.pull_file(file)`` | ``self.target.pull(file)`` | +----------------------------------------------+---------------------------------+ |``self.device.push_file(file)`` | ``self.target.push(file)`` | +----------------------------------------------+---------------------------------+ |``self.device.install_executable(file)`` | ``self.target.install(file)`` | +----------------------------------------------+---------------------------------+ |``self.device.execute(cmd, background=True)`` | ``self.target.background(cmd)``| +----------------------------------------------+---------------------------------+ ================================================ FILE: doc/source/plugins.rst ================================================ .. _plugin-reference: ================= Plugin Reference ================= This section lists Plugins that currently come with WA3. Each package below represents a particular type of extension (e.g. a workload); each sub-package of that package is a particular instance of that extension (e.g. the Andebench workload). Clicking on a link will show what the individual extension does, what configuration parameters it takes, etc. For how to implement you own Plugins, please refer to the guides in the :ref:`writing plugins ` section. .. raw:: html
.. toctree:: :maxdepth: 2 plugins/workloads .. raw:: html .. toctree:: :maxdepth: 2 plugins/instruments .. toctree:: :maxdepth: 2 plugins/energy_instrument_backends .. raw:: html .. toctree:: :maxdepth: 2 plugins/output_processors .. raw:: html .. toctree:: :maxdepth: 2 plugins/targets .. raw:: html
================================================ FILE: doc/source/user_information/how_to.rst ================================================ ******* How Tos ******* .. contents:: Contents :depth: 4 :local: .. include:: user_information/how_tos/agenda.rst .. include:: user_information/how_tos/device_setup.rst .. include:: user_information/how_tos/revent.rst ================================================ FILE: doc/source/user_information/how_tos/agenda.rst ================================================ .. _agenda: Defining Experiments With an Agenda =================================== An agenda specifies what is to be done during a Workload Automation run, including which workloads will be run, with what configuration, which augmentations will be enabled, etc. Agenda syntax is designed to be both succinct and expressive. Agendas are specified using YAML_ notation. It is recommended that you familiarize yourself with the linked page. .. _YAML: http://en.wikipedia.org/wiki/YAML Specifying which workloads to run --------------------------------- The central purpose of an agenda is to specify what workloads to run. A minimalist agenda contains a single entry at the top level called "workloads" that maps onto a list of workload names to run: .. code-block:: yaml workloads: - dhrystone - memcpy - rt_app This specifies a WA run consisting of ``dhrystone`` followed by ``memcpy``, followed by ``rt_app`` workloads, and using the augmentations specified in config.yaml (see :ref:`configuration-specification` section). .. note:: If you're familiar with YAML, you will recognize the above as a single-key associative array mapping onto a list. YAML has two notations for both associative arrays and lists: block notation (seen above) and also in-line notation. This means that the above agenda can also be written in a single line as :: workloads: [dhrystone, memcpy, rt-app] (with the list in-lined), or :: {workloads: [dhrystone, memcpy, rt-app]} (with both the list and the associative array in-line). WA doesn't care which of the notations is used as they all get parsed into the same structure by the YAML parser. You can use whatever format you find easier/clearer. .. note:: WA plugin names are case-insensitive, and dashes (``-``) and underscores (``_``) are treated identically. So all of the following entries specify the same workload: ``rt_app``, ``rt-app``, ``RT-app``. Multiple iterations ------------------- There will normally be some variability in workload execution when running on a real device. In order to quantify it, multiple iterations of the same workload are usually performed. You can specify the number of iterations for each workload by adding ``iterations`` field to the workload specifications (or "specs"): .. code-block:: yaml workloads: - name: dhrystone iterations: 5 - name: memcpy iterations: 5 - name: cyclictest iterations: 5 Now that we're specifying both the workload name and the number of iterations in each spec, we have to explicitly name each field of the spec. It is often the case that, as in in the example above, you will want to run all workloads for the same number of iterations. Rather than having to specify it for each and every spec, you can do with a single entry by adding `iterations` to your ``config`` section in your agenda: .. code-block:: yaml config: iterations: 5 workloads: - dhrystone - memcpy - cyclictest If the same field is defined both in config section and in a spec, then the value in the spec will overwrite the value. For example, suppose we wanted to run all our workloads for five iterations, except cyclictest which we want to run for ten (e.g. because we know it to be particularly unstable). This can be specified like this: .. code-block:: yaml config: iterations: 5 workloads: - dhrystone - memcpy - name: cyclictest iterations: 10 Again, because we are now specifying two fields for cyclictest spec, we have to explicitly name them. Configuring Workloads --------------------- Some workloads accept configuration parameters that modify their behaviour. These parameters are specific to a particular workload and can alter the workload in any number of ways, e.g. set the duration for which to run, or specify a media file to be used, etc. The vast majority of workload parameters will have some default value, so it is only necessary to specify the name of the workload in order for WA to run it. However, sometimes you want more control over how a workload runs. For example, by default, dhrystone will execute 10 million loops across four threads. Suppose your device has six cores available and you want the workload to load them all. You also want to increase the total number of loops accordingly to 15 million. You can specify this using dhrystone's parameters: .. code-block:: yaml config: iterations: 5 workloads: - name: dhrystone params: threads: 6 mloops: 15 - memcpy - name: cyclictest iterations: 10 .. note:: You can find out what parameters a workload accepts by looking it up in the :ref:`Workloads` section or using WA itself with "show" command:: wa show dhrystone see the :ref:`Invocation` section for details. In addition to configuring the workload itself, we can also specify configuration for the underlying device which can be done by setting runtime parameters in the workload spec. Explicit runtime parameters have been exposed for configuring cpufreq, hotplug and cpuidle. For more detailed information on Runtime Parameters see the :ref:`runtime parameters ` section. For example, suppose we want to ensure the maximum score for our benchmarks, at the expense of power consumption so we want to set the cpufreq governor to "performance" and enable all of the cpus on the device, (assuming there are 8 cpus available), which can be done like this: .. code-block:: yaml config: iterations: 5 workloads: - name: dhrystone runtime_params: governor: performance num_cores: 8 workload_params: threads: 6 mloops: 15 - memcpy - name: cyclictest iterations: 10 I've renamed ``params`` to ``workload_params`` for clarity, but that wasn't strictly necessary as ``params`` is interpreted as ``workload_params`` inside a workload spec. Runtime parameters do not automatically reset at the end of workload spec execution, so all subsequent iterations will also be affected unless they explicitly change the parameter (in the example above, performance governor will also be used for ``memcpy`` and ``cyclictest``. There are two ways around this: either set ``reboot_policy`` WA setting (see :ref:`configuration-specification` section) such that the device gets rebooted between job executions, thus being returned to its initial state, or set the default runtime parameter values in the ``config`` section of the agenda so that they get set for every spec that doesn't explicitly override them. If additional configuration of the device is required which are not exposed via the built in runtime parameters, you can write a value to any file exposed on the device using ``sysfile_values``, for example we could have also performed the same configuration manually (assuming we have a big.LITTLE system and our cores 0-3 and 4-7 are in 2 separate DVFS domains and so setting the governor for cpu0 and cpu4 will affect all our cores) e.g. .. code-block:: yaml config: iterations: 5 workloads: - name: dhrystone runtime_params: sysfile_values: /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor: performance /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor: performance /sys/devices/system/cpu/cpu0/online: 1 /sys/devices/system/cpu/cpu1/online: 1 /sys/devices/system/cpu/cpu2/online: 1 /sys/devices/system/cpu/cpu3/online: 1 /sys/devices/system/cpu/cpu4/online: 1 /sys/devices/system/cpu/cpu5/online: 1 /sys/devices/system/cpu/cpu6/online: 1 /sys/devices/system/cpu/cpu7/online: 1 workload_params: threads: 6 mloops: 15 - memcpy - name: cyclictest iterations: 10 Here, we're specifying a ``sysfile_values`` runtime parameter for the device. For more information please see :ref:`setting sysfiles `. APK Workloads ^^^^^^^^^^^^^ WA has various resource getters that can be configured to locate APK files but for most people APK files should be kept in the ``$WA_USER_DIRECTORY/dependencies/SOME_WORKLOAD/`` directory. (by default ``~/.workload_automation/dependencies/SOME_WORKLOAD/``). The ``WA_USER_DIRECTORY`` environment variable can be used to change the location of this directory. The APK files need to be put into the corresponding directories for the workload they belong to. The name of the file can be anything but as explained below may need to contain certain pieces of information. All ApkWorkloads have parameters that affect the way in which APK files are resolved, ``exact_abi``, ``force_install`` and ``prefer_host_package``. Their exact behaviours are outlined below. :exact_abi: If this setting is enabled WA's resource resolvers will look for the devices ABI with any native code present in the apk. By default this setting is disabled since most apks will work across all devices. You may wish to enable this feature when working with devices that support multiple ABI's (like 64-bit devices that can run 32-bit APK files) and are specifically trying to test one or the other. :force_install: If this setting is enabled WA will *always* use the APK file on the host, and re-install it on every iteration. If there is no APK on the host that is a suitable version and/or ABI for the workload WA will error when ``force_install`` is enabled. :prefer_host_package: This parameter is used to specify a preference over host or target versions of the app. When set to ``True`` WA will prefer the host side version of the APK. It will check if the host has the APK and whether it meets the version requirements of the workload. If so, and the target also already has same version nothing will be done, otherwise WA will overwrite the targets installed application with the host version. If the host is missing the APK or it does not meet version requirements WA will fall back to the app on the target if present and is a suitable version. When this parameter is set to ``False`` WA will prefer to use the version already on the target if it meets the workloads version requirements. If it does not it will fall back to searching the host for the correct version. In both modes if neither the host nor target have a suitable version, WA will produce and error and will not run the workload. :version: This parameter is used to specify which version of uiautomation for the workload is used. In some workloads e.g. ``geekbench`` multiple versions with drastically different UI's are supported. A APKs version will be automatically extracted therefore it is possible to have multiple apks for different versions of a workload present on the host and select between which is used for a particular job by specifying the relevant version in your :ref:`agenda `. :variant_name: Some workloads use variants of APK files, this is usually the case with web browser APK files, these work in exactly the same way as the version. IDs and Labels -------------- It is possible to list multiple specs with the same workload in an agenda. You may wish to do this if you want to run a workload with different parameter values or under different runtime configurations of the device. The workload name therefore does not uniquely identify a spec. To be able to distinguish between different specs (e.g. in reported results), each spec has an ID which is unique to all specs within an agenda (and therefore with a single WA run). If an ID isn't explicitly specified using ``id`` field (note that the field name is in lower case), one will be automatically assigned to the spec at the beginning of the WA run based on the position of the spec within the list. The first spec *without an explicit ID* will be assigned ID ``wk1``, the second spec *without an explicit ID* will be assigned ID ``wk2``, and so forth. Numerical IDs aren't particularly easy to deal with, which is why it is recommended that, for non-trivial agendas, you manually set the ids to something more meaningful (or use labels -- see below). An ID can be pretty much anything that will pass through the YAML parser. The only requirement is that it is unique to the agenda. However, is usually better to keep them reasonably short (they don't need to be *globally* unique), and to stick with alpha-numeric characters and underscores/dashes. While WA can handle other characters as well, getting too adventurous with your IDs may cause issues further down the line when processing WA output (e.g. when uploading them to a database that may have its own restrictions). In addition to IDs, you can also specify labels for your workload specs. These are similar to IDs but do not have the uniqueness restriction. If specified, labels will be used by some output processes instead of (or in addition to) the workload name. For example, the ``csv`` output processor will put the label in the "workload" column of the CSV file. It is up to you how you chose to use IDs and labels. WA itself doesn't expect any particular format (apart from uniqueness for IDs). Below is the earlier example updated to specify explicit IDs and label dhrystone spec to reflect parameters used. .. code-block:: yaml config: iterations: 5 workloads: - id: 01_dhry name: dhrystone label: dhrystone_15over6 runtime_params: cpu0_governor: performance workload_params: threads: 6 mloops: 15 - id: 02_memc name: memcpy - id: 03_cycl name: cyclictest iterations: 10 .. _using-classifiers: Classifiers ------------ Classifiers can be used in 2 distinct ways, the first use is being supplied in an agenda as a set of key-value pairs which can be used to help identify sub-tests of a run, for example if you have multiple sections in your agenda running your workloads at different frequencies you might want to set a classifier specifying which frequencies are being used. These can then be utilized later, for example with the ``csv`` :ref:`output processor ` with ``use_all_classifiers`` set to ``True`` and this will add additional columns to the output file for each of the classifier keys that have been specified allowing for quick comparison. An example agenda is shown here: .. code-block:: yaml config: augmentations: - csv iterations: 1 device: generic_android csv: use_all_classifiers: True sections: - id: max_speed runtime_parameters: frequency: 1700000 classifiers: freq: 1700000 - id: min_speed runtime_parameters: frequency: 200000 classifiers: freq: 200000 workloads: - name: recentfling The other way that they can used is by being automatically added by some workloads to identify their results metrics and artifacts. For example some workloads perform multiple tests with the same execution run and therefore will use metrics to differentiate between them, e.g. the ``recentfling`` workload will use classifiers to distinguish between which loop a particular result is for or whether it is an average across all loops ran. The output from the agenda above will produce a csv file similar to what is shown below. Some columns have been omitted for clarity however as can been seen the custom **frequency** classifier column has been added and populated, along with the **loop** classifier added by the workload. :: id | workload | metric | freq | loop | value ‖ max_speed-wk1 | recentfling | 90th Percentile | 1700000 | 1 | 8 ‖ max_speed-wk1 | recentfling | 95th Percentile | 1700000 | 1 | 9 ‖ max_speed-wk1 | recentfling | 99th Percentile | 1700000 | 1 | 16 ‖ max_speed-wk1 | recentfling | Jank | 1700000 | 1 | 11 ‖ max_speed-wk1 | recentfling | Jank% | 1700000 | 1 | 1 ‖ # ... max_speed-wk1 | recentfling | Jank | 1700000 | 3 | 1 ‖ max_speed-wk1 | recentfling | Jank% | 1700000 | 3 | 0 ‖ max_speed-wk1 | recentfling | Average 90th Percentqile | 1700000 | Average | 7 ‖ max_speed-wk1 | recentfling | Average 95th Percentile | 1700000 | Average | 8 ‖ max_speed-wk1 | recentfling | Average 99th Percentile | 1700000 | Average | 14 ‖ max_speed-wk1 | recentfling | Average Jank | 1700000 | Average | 6 ‖ max_speed-wk1 | recentfling | Average Jank% | 1700000 | Average | 0 ‖ min_speed-wk1 | recentfling | 90th Percentile | 200000 | 1 | 7 ‖ min_speed-wk1 | recentfling | 95th Percentile | 200000 | 1 | 8 ‖ min_speed-wk1 | recentfling | 99th Percentile | 200000 | 1 | 14 ‖ min_speed-wk1 | recentfling | Jank | 200000 | 1 | 5 ‖ min_speed-wk1 | recentfling | Jank% | 200000 | 1 | 0 ‖ # ... min_speed-wk1 | recentfling | Jank | 200000 | 3 | 5 ‖ min_speed-wk1 | recentfling | Jank% | 200000 | 3 | 0 ‖ min_speed-wk1 | recentfling | Average 90th Percentile | 200000 | Average | 7 ‖ min_speed-wk1 | recentfling | Average 95th Percentile | 200000 | Average | 8 ‖ min_speed-wk1 | recentfling | Average 99th Percentile | 200000 | Average | 13 ‖ min_speed-wk1 | recentfling | Average Jank | 200000 | Average | 4 ‖ min_speed-wk1 | recentfling | Average Jank% | 200000 | Average | 0 ‖ .. _sections: Sections -------- It is a common requirement to be able to run the same set of workloads under different device configurations. E.g. you may want to investigate the impact of changing a particular setting to different values on the benchmark scores, or to quantify the impact of enabling a particular feature in the kernel. WA allows this by defining "sections" of configuration with an agenda. For example, suppose that we want to measure the impact of using 3 different cpufreq governors on 2 benchmarks. We could create 6 separate workload specs and set the governor runtime parameter for each entry. However, this introduces a lot of duplication; and what if we want to change spec configuration? We would have to change it in multiple places, running the risk of forgetting one. A better way is to keep the two workload specs and define a section for each governor: .. code-block:: yaml config: iterations: 5 augmentations: - ~cpufreq - csv sysfs_extractor: paths: [/proc/meminfo] csv: use_all_classifiers: True sections: - id: perf runtime_params: cpu0_governor: performance - id: inter runtime_params: cpu0_governor: interactive - id: sched runtime_params: cpu0_governor: sched workloads: - id: 01_dhry name: dhrystone label: dhrystone_15over6 workload_params: threads: 6 mloops: 15 - id: 02_memc name: memcpy augmentations: [sysfs_extractor] A section, just like an workload spec, needs to have a unique ID. Apart from that, a "section" is similar to the ``config`` section we've already seen -- everything that goes into a section will be applied to each workload spec. Workload specs defined under top-level ``workloads`` entry will be executed for each of the sections listed under ``sections``. .. note:: It is also possible to have a ``workloads`` entry within a section, in which case, those workloads will only be executed for that specific section. In order to maintain the uniqueness requirement of workload spec IDs, they will be namespaced under each section by prepending the section ID to the spec ID with a dash. So in the agenda above, we no longer have a workload spec with ID ``01_dhry``, instead there are two specs with IDs ``perf-01-dhry`` and ``inter-01_dhry``. Note that the ``config`` section still applies to every spec in the agenda. So the precedence order is -- spec settings override section settings, which in turn override global settings. .. _section-groups: Section Groups --------------- Section groups are a way of grouping sections together and are used to produce a cross product of each of the different groups. This can be useful when you want to run a set of experiments with all the available combinations without having to specify each combination manually. For example if we want to investigate the differences between running the maximum and minimum frequency with both the maximum and minimum number of cpus online, we can create an agenda as follows: .. code-block:: yaml sections: - id: min_freq runtime_parameters: freq: min group: frequency - id: max_freq runtime_parameters: freq: max group: frequency - id: min_cpus runtime_parameters: cpus: 1 group: cpus - id: max_cpus runtime_parameters: cpus: 8 group: cpus workloads: - dhrystone This will results in 8 jobs being generated for each of the possible combinations. :: min_freq-min_cpus-wk1 (dhrystone) min_freq-max_cpus-wk1 (dhrystone) max_freq-min_cpus-wk1 (dhrystone) max_freq-max_cpus-wk1 (dhrystone) min_freq-min_cpus-wk1 (dhrystone) min_freq-max_cpus-wk1 (dhrystone) max_freq-min_cpus-wk1 (dhrystone) max_freq-max_cpus-wk1 (dhrystone) Each of the generated jobs will have :ref:`classifiers ` for each group and the associated id automatically added. .. code-block:: python # ... print('Job ID: {}'.format(job.id)) print('Classifiers:') for k, v in job.classifiers.items(): print(' {}: {}'.format(k, v)) Job ID: min_freq-min_cpus-no_idle-wk1 Classifiers: frequency: min_freq cpus: min_cpus .. _augmentations: Augmentations -------------- Augmentations are plugins that augment the execution of workload jobs with additional functionality; usually, that takes the form of generating additional metrics and/or artifacts, such as traces or logs. There are two types of augmentations: Instruments These "instrument" a WA run in order to change it's behaviour (e.g. introducing delays between successive job executions), or collect additional measurements (e.g. energy usage). Some instruments may depend on particular features being enabled on the target (e.g. cpufreq), or on additional hardware (e.g. energy probes). Output processors These post-process metrics and artifacts generated by workloads or instruments, as well as target metadata collected by WA, in order to generate additional metrics and/or artifacts (e.g. generating statistics or reports). Output processors are also used to export WA output externally (e.g. upload to a database). The main practical difference between instruments and output processors, is that the former rely on an active connection to the target to function, where as the latter only operated on previously collected results and metadata. This means that output processors can run "off-line" using ``wa process`` command. Both instruments and output processors are configured in the same way in the agenda, which is why they are grouped together into "augmentations". Augmentations are enabled by listing them under ``augmentations`` entry in a config file or ``config`` section of the agenda. .. code-block:: yaml config: augmentations: [trace-cmd] The code above illustrates an agenda entry to enabled ``trace-cmd`` instrument. If your have multiple ``augmentations`` entries (e.g. both, in your config file and in the agenda), then they will be combined, so that the final set of augmentations for the run will be their union. .. note:: WA2 did not have have augmentationts, and instead supported "instrumentation" and "result_processors" as distinct configuration enetries. For compantibility, these entries are still supported in WA3, however they should be considered to be depricated, and their use is discouraged. Configuring augmentations ^^^^^^^^^^^^^^^^^^^^^^^^^ Most augmentations will take parameters that modify their behavior. Parameters available for a particular augmentation can be viewed using ``wa show `` command. This will also show the default values used. Values for these parameters can be specified by creating an entry with the augmentation's name, and specifying parameter values under it. .. code-block:: yaml config: augmentations: [trace-cmd] trace-cmd: events: ['sched*', 'power*', irq] buffer_size: 100000 The code above specifies values for ``events`` and ``buffer_size`` parameters for the ``trace-cmd`` instrument, as well as enabling it. You may specify configuration for the same augmentation in multiple locations (e.g. your config file and the config section of the agenda). These entries will be combined to form the final configuration for the augmentation used during the run. If different values for the same parameter are present in multiple entries, the ones "more specific" to a particular run will be used (e.g. values in the agenda will override those in the config file). .. note:: Creating an entry for an augmentation alone does not enable it! You **must** list it under ``augmentations`` in order for it to be enabed for a run. This makes it easier to quickly enabled and diable augmentations with complex configurations, and also allows defining "static" configuation in top-level config, without actually enabling the augmentation for all runs. Disabling augmentations ^^^^^^^^^^^^^^^^^^^^^^^ Sometimes, you may wish to disable an augmentation for a particular run, but you want to keep it enabled in general. You *could* modify your config file to temporarily disable it. However, you must then remember to re-enable it afterwards. This could be inconvenient and error prone, especially if you're running multiple experiments in parallel and only want to disable the augmentation for one of them. Instead, you can explicitly disable augmentation by specifying its name prefixed with a tilde (``~``) inside ``augumentations``. .. code-block:: yaml config: augmentations: [trace-cmd, ~cpufreq] The code above enables ``trace-cmd`` instrument and disables ``cpufreq`` instrument (which is enabled in the default config). If you want to start configuration for an experiment form a "blank slate" and want to disable all previously-enabled augmentations, without necessarily knowing what they are, you can use the special ``~~`` entry. .. code-block:: yaml config: augmentations: [~~, trace-cmd, csv] The code above disables all augmentations enabled up to that point, and enabled ``trace-cmd`` and ``csv`` for this run. .. note:: The ``~~`` only disables augmentations from previously-processed sources. Its ordering in the list does not matter. For example, specifying ``augmentations: [trace-cmd, ~~, csv]`` will have exactly the same effect as above -- i.e. both trace-cmd *and* csv will be enabled. Workload-specific augmentation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is possible to enable or disable (but not configure) augmentations at workload or section level, as well as in the global config, in which case, the augmentations would only be enabled/disabled for that workload/section. If the same augmentation is enabled at one level and disabled at another, as with all WA configuration, the more specific settings will take precedence over the less specific ones (i.e. workloads override sections that, in turn, override global config). Augmentations Example ^^^^^^^^^^^^^^^^^^^^^ .. code-block:: yaml config: augmentations: [~~, fps] trace-cmd: events: ['sched*', 'power*', irq] buffer_size: 100000 file_poller: files: - /sys/class/thermal/thermal_zone0/temp sections: - classifers: type: energy augmentations: [energy_measurement] - classifers: type: trace augmentations: [trace-cmd, file_poller] workloads: - gmail - geekbench - googleplaybooks - name: dhrystone augmentations: [~fps] The example above shows an experiment that runs a number of workloads in order to evaluate their thermal impact and energy usage. All previously-configured augmentations are disabled with ``~~``, so that only configuration specified in this agenda is enabled. Since most of the workloads are "productivity" use cases that do not generate their own metrics, ``fps`` instrument is enabled to get some meaningful performance metrics for them; the only exception is ``dhrystone`` which is a benchmark that reports its own metrics and has not GUI, so the instrument is disabled for it using ``~fps``. Each workload will be run in two configurations: once, to collect energy measurements, and once to collect thermal data and kernel trace. Trace can give insight into why a workload is using more or less energy than expected, but it can be relatively intrusive and might impact absolute energy and performance metrics, which is why it is collected separately. Classifiers_ are used to separate metrics from the two configurations in the results. .. _other-agenda-configuration: Other Configuration ------------------- .. _configuration_in_agenda: As mentioned previously, ``config`` section in an agenda can contain anything that can be defined in ``config.yaml``. Certain configuration (e.g. ``run_name``) makes more sense to define in an agenda than a config file. Refer to the :ref:`configuration-specification` section for details. .. code-block:: yaml config: project: governor_comparison run_name: performance_vs_interactive device: generic_android reboot_policy: never iterations: 5 augmentations: - ~cpufreq - csv sysfs_extractor: paths: [/proc/meminfo] csv: use_all_classifiers: True sections: - id: perf runtime_params: sysfile_values: cpu0_governor: performance - id: inter runtime_params: cpu0_governor: interactive workloads: - id: 01_dhry name: dhrystone label: dhrystone_15over6 workload_params: threads: 6 mloops: 15 - id: 02_memc name: memcpy augmentations: [sysfs_extractor] - id: 03_cycl name: cyclictest iterations: 10 ================================================ FILE: doc/source/user_information/how_tos/device_setup.rst ================================================ .. _setting-up-a-device: Setting Up A Device =================== WA should work with most Android devices out-of-the box, as long as the device is discoverable by ``adb`` (i.e. gets listed when you run ``adb devices``). For USB-attached devices, that should be the case; for network devices, ``adb connect`` would need to be invoked with the IP address of the device. If there is only one device connected to the host running WA, then no further configuration should be necessary (though you may want to :ref:`tweak some Android settings `\ ). If you have multiple devices connected, have a non-standard Android build (e.g. on a development board), or want to use of the more advanced WA functionality, further configuration will be required. Android ------- .. _android-general-device-setup: General Device Setup ^^^^^^^^^^^^^^^^^^^^ You can specify the device interface by setting ``device`` setting in a ``config`` file or section. Available interfaces can be viewed by running ``wa list targets`` command. If you don't see your specific platform listed (which is likely unless you're using one of the Arm-supplied platforms), then you should use ``generic_android`` interface (this is what is used by the default config). .. code-block:: yaml device: generic_android The device interface may be configured through ``device_config`` setting, who's value is a ``dict`` mapping setting names to their values. Some of the most common parameters you might want to change are outlined below. :device: If you have multiple Android devices connected to the host machine, you will need to set this to indicate to WA which device you want it to use. The will be the adb name the is displayed when running ``adb devices`` :working_directory: WA needs a "working" directory on the device which it will use for collecting traces, caching assets it pushes to the device, etc. By default, it will create one under ``/sdcard`` which should be mapped and writable on standard Android builds. If this is not the case for your device, you will need to specify an alternative working directory (e.g. under ``/data/local``). :load_default_modules: A number of "default" modules (e.g. for cpufreq subsystem) are loaded automatically, unless explicitly disabled. If you encounter an issue with one of the modules then this setting can be set to ``False`` and any specific modules that you require can be request via the ``modules`` entry. :modules: A list of additional modules to be installed for the target. Devlib implements functionality for particular subsystems as modules. If additional modules need to be loaded, they may be specified using this parameter. Please see the `devlib documentation `_ for information on the available modules. .. _core-names: :core_names: ``core_names`` should be a list of core names matching the order in which they are exposed in sysfs. For example, Arm TC2 SoC is a 2x3 big.LITTLE system; its core_names would be ``['a7', 'a7', 'a7', 'a15', 'a15']``, indicating that cpu0-cpu2 in cpufreq sysfs structure are A7's and cpu3 and cpu4 are A15's. .. note:: This should not usually need to be provided as it will be automatically extracted from the target. A typical ``device_config`` inside ``config.yaml`` may look something like .. code-block:: yaml device_config: device: 0123456789ABCDEF # ... or a more specific config could be: .. code-block:: yaml device_config: device: 0123456789ABCDEF working_direcory: '/sdcard/wa-working' load_default_modules: True modules: ['hotplug', 'cpufreq'] core_names : ['a7', 'a7', 'a7', 'a15', 'a15'] # ... .. _configuring-android: Configuring Android ^^^^^^^^^^^^^^^^^^^ There are a few additional tasks you may need to perform once you have a device booted into Android (especially if this is an initial boot of a fresh OS deployment): - You have gone through FTU (first time usage) on the home screen and in the apps menu. - You have disabled the screen lock. - You have set sleep timeout to the highest possible value (30 mins on most devices). - You have set the locale language to "English" (this is important for some workloads in which UI automation looks for specific text in UI elements). Juno Setup ---------- .. note:: At the time of writing, the Android software stack on Juno was still very immature. Some workloads may not run, and there maybe stability issues with the device. The full software stack can be obtained from Linaro: https://releases.linaro.org/android/images/lcr-reference-juno/latest/ Please follow the instructions on the "Binary Image Installation" tab on that page. More up-to-date firmware and kernel may also be obtained by registered members from ARM Connected Community: http://www.arm.com/community/ (though this is not guaranteed to work with the Linaro file system). UEFI ^^^^ Juno uses UEFI_ to boot the kernel image. UEFI supports multiple boot configurations, and presents a menu on boot to select (in default configuration it will automatically boot the first entry in the menu if not interrupted before a timeout). WA will look for a specific entry in the UEFI menu (``'WA'`` by default, but that may be changed by setting ``uefi_entry`` in the ``device_config``). When following the UEFI instructions on the above Linaro page, please make sure to name the entry appropriately (or to correctly set the ``uefi_entry``). .. _UEFI: http://en.wikipedia.org/wiki/UEFI There are two supported ways for Juno to discover kernel images through UEFI. It can either load them from NOR flash on the board, or from the boot partition on the file system. The setup described on the Linaro page uses the boot partition method. If WA does not find the UEFI entry it expects, it will create one. However, it will assume that the kernel image resides in NOR flash, which means it will not work with Linaro file system. So if you're replicating the Linaro setup exactly, you will need to create the entry manually, as outline on the above-linked page. Rebooting ^^^^^^^^^ At the time of writing, normal Android reboot did not work properly on Juno Android, causing the device to crash into an irrecoverable state. Therefore, WA will perform a hard reset to reboot the device. It will attempt to do this by toggling the DTR line on the serial connection to the device. In order for this to work, you need to make sure that SW1 configuration switch on the back panel of the board (the right-most DIP switch) is toggled *down*. Linux ----- General Device Setup ^^^^^^^^^^^^^^^^^^^^ You can specify the device interface by setting ``device`` setting in a ``config`` file or section. Available interfaces can be viewed by running ``wa list targets`` command. If you don't see your specific platform listed (which is likely unless you're using one of the Arm-supplied platforms), then you should use ``generic_linux`` interface. .. code-block:: yaml device: generic_linux The device interface may be configured through ``device_config`` setting, who's value is a ``dict`` mapping setting names to their values. Some of the most common parameters you might want to change are outlined below. :host: This should be either the the DNS name or IP address of the device. :username: The login name of the user on the device that WA will use. This user should have a home directory (unless an alternative working directory is specified using ``working_directory`` config -- see below), and, for full functionality, the user should have sudo rights (WA will be able to use sudo-less acounts but some instruments or workload may not work). :password: Password for the account on the device. Either this of a ``keyfile`` (see below) must be specified. :keyfile: If key-based authentication is used, this may be used to specify the SSH identity file instead of the password. :property_files: This is a list of paths that will be pulled for each WA run into the __meta subdirectory in the results. The intention is to collect meta-data about the device that may aid in reporducing the results later. The paths specified do not have to exist on the device (they will be ignored if they do not). The default list is ``['/proc/version', '/etc/debian_version', '/etc/lsb-release', '/etc/arch-release']`` In addition, ``working_directory``, ``core_names``, ``modules`` etc. can also be specified and have the same meaning as for Android devices (see above). A typical ``device_config`` inside ``config.yaml`` may look something like .. code-block:: yaml device_config: host: 192.168.0.7 username: guest password: guest # ... Chrome OS --------- General Device Setup ^^^^^^^^^^^^^^^^^^^^ You can specify the device interface by setting ``device`` setting in a ``config`` file or section. Available interfaces can be viewed by running ``wa list targets`` command. If you don't see your specific platform listed (which is likely unless you're using one of the Arm-supplied platforms), then you should use ``generic_chromeos`` interface. .. code-block:: yaml device: generic_chromeos The device interface may be configured through ``device_config`` setting, who's value is a ``dict`` mapping setting names to their values. The ChromeOS target is essentially the same as a linux device and requires a similar setup, however it also optionally supports connecting to an android container running on the device which will be automatically detected if present. If the device supports android applications then the android configuration is also supported. In order to support this WA will open 2 connections to the device, one via SSH to the main OS and another via ADB to the android container where a limited subset of functionality can be performed. In order to distinguish between the two connections some of the android specific configuration has been renamed to reflect the destination. :android_working_directory: WA needs a "working" directory on the device which it will use for collecting traces, caching assets it pushes to the device, etc. By default, it will create one under ``/sdcard`` which should be mapped and writable on standard Android builds. If this is not the case for your device, you will need to specify an alternative working directory (e.g. under ``/data/local``). A typical ``device_config`` inside ``config.yaml`` for a ChromeOS device may look something like .. code-block:: yaml device_config: host: 192.168.0.7 username: root android_working_direcory: '/sdcard/wa-working' # ... .. note:: This assumes that your Chromebook is in developer mode and is configured to run an SSH server with the appropriate ssh keys added to the authorized_keys file on the device. Related Settings ---------------- Reboot Policy ^^^^^^^^^^^^^ This indicates when during WA execution the device will be rebooted. By default this is set to ``as_needed``, indicating that WA will only reboot the device if it becomes unresponsive. Please see ``reboot_policy`` documentation in :ref:`configuration-specification` for more details. Execution Order ^^^^^^^^^^^^^^^ ``execution_order`` defines the order in which WA will execute workloads. ``by_iteration`` (set by default) will execute the first iteration of each spec first, followed by the second iteration of each spec (that defines more than one iteration) and so forth. The alternative will loop through all iterations for the first first spec first, then move on to second spec, etc. Again, please see :ref:`configuration-specification` for more details. Adding a new target interface ----------------------------- If you are working with a particularly unusual device (e.g. a early stage development board) or need to be able to handle some quirk of your Android build, configuration available in ``generic_android`` interface may not be enough for you. In that case, you may need to write a custom interface for your device. A device interface is an ``Extension`` (a plug-in) type in WA and is implemented similar to other extensions (such as workloads or instruments). Pleaser refer to the :ref:`adding a custom target ` section for information on how this may be done. ================================================ FILE: doc/source/user_information/how_tos/revent.rst ================================================ .. _revent_files_creation: Automating GUI Interactions With Revent ======================================= Overview and Usage ------------------ The revent utility can be used to record and later play back a sequence of user input events, such as key presses and touch screen taps. This is an alternative to Android UI Automator for providing automation for workloads. Using revent with workloads ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Some workloads (pretty much all games) rely on recorded revents for their execution. ReventWorkloads require between 1 and 4 revent files to be ran. There is one mandatory recording, ``run``, for performing the actual execution of the workload and the remaining stages are optional. ``setup`` can be used to perform the initial setup (navigating menus, selecting game modes, etc). ``extract_results`` can be used to perform any actions after the main stage of the workload for example to navigate a results or summary screen of the app. And finally ``teardown`` can be used to perform any final actions for example exiting the app. Because revents are very device-specific\ [*]_, these files would need to be recorded for each device. The files must be called ``.(setup|run|extract_results|teardown).revent``, where ```` is the name of your device (as defined by the model name of your device which can be retrieved with ``adb shell getprop ro.product.model`` or by the ``name`` attribute of your customized device class). WA will look for these files in two places: ``/wa/workloads//revent_files`` and ``$WA_USER_DIRECTORY/dependencies/``. The first location is primarily intended for revent files that come with WA (and if you did a system-wide install, you'll need sudo to add files there), so it's probably easier to use the second location for the files you record. Also, if revent files for a workload exist in both locations, the files under ``$WA_USER_DIRECTORY/dependencies`` will be used in favour of those installed with WA. .. [*] It's not just about screen resolution -- the event codes may be different even if devices use the same screen. .. _revent-recording: Recording ^^^^^^^^^ WA features a ``record`` command that will automatically deploy and start revent on the target device. If you want to simply record a single recording on the device then the following command can be used which will save the recording in the current directory:: wa record There is one mandatory stage called 'run' and 3 optional stages: 'setup', 'extract_results' and 'teardown' which are used for playback of a workload. The different stages are distinguished by the suffix in the recording file path. In order to facilitate in creating these recordings you can specify ``--setup``, ``--extract-results``, ``--teardown`` or ``--all`` to indicate which stages you would like to create recordings for and the appropriate file name will be generated. You can also directly specify a workload to create recordings for and WA will walk you through the relevant steps. For example if we waned to create recordings for the Angrybirds Rio workload we can specify the ``workload`` flag with ``-w``. And in this case WA can be used to automatically deploy and launch the workload and record ``setup`` (``-s``) , ``run`` (``-r``) and ``teardown`` (``-t``) stages for the workload. In order to do this we would use the following command with an example output shown below:: wa record -srt -w angrybirds_rio :: INFO Setting up target INFO Deploying angrybirds_rio INFO Press Enter when you are ready to record SETUP... [Pressed Enter] INFO Press Enter when you have finished recording SETUP... [Pressed Enter] INFO Pulling 'setup.revent' from device INFO Press Enter when you are ready to record RUN... [Pressed Enter] INFO Press Enter when you have finished recording RUN... [Pressed Enter] INFO Pulling '.run.revent' from device INFO Press Enter when you are ready to record TEARDOWN... [Pressed Enter] INFO Press Enter when you have finished recording TEARDOWN... [Pressed Enter] INFO Pulling '.teardown.revent' from device INFO Tearing down angrybirds_rio INFO Recording(s) are available at: '$WA_USER_DIRECTORY/dependencies/angrybirds_rio/revent_files' Once you have made your desired recordings, you can either manually playback individual recordings using the :ref:`replay ` command or, with the recordings in the appropriate dependencies location, simply run the workload using the :ref:`run ` command and then all the available recordings will be played back automatically. For more information on available arguments please see the :ref:`Record ` command. .. note:: By default revent recordings are not portable across devices and therefore will require recording for each new device you wish to use the workload on. Alternatively a "gamepad" recording mode is also supported. This mode requires a gamepad to be connected to the device when recording but the recordings produced in this mode should be portable across devices. .. _revent_replaying: Replaying ^^^^^^^^^ If you want to replay a single recorded file, you can use ``wa replay`` providing it with the file you want to replay. An example of the command output is shown below:: wa replay my_recording.revent INFO Setting up target INFO Pushing file to target INFO Starting replay INFO Finished replay If you are using a device that supports android you can optionally specify a package name to launch before replaying the recording. If you have recorded the required files for your workload and have placed the in the appropriate location (or specified the workload during recording) then you can simply run the relevant workload and your recordings will be replayed at the appropriate times automatically. For more information run please read :ref:`replay-command` Revent vs UiAutomator ---------------------- In general, Android UI Automator is the preferred way of automating user input for Android workloads because, unlike revent, UI Automator does not depend on a particular screen resolution, and so is more portable across different devices. It also gives better control and can potentially be faster for doing UI manipulations, as input events are scripted based on the available UI elements, rather than generated by human input. On the other hand, revent can be used to manipulate pretty much any workload, where as UI Automator only works for Android UI elements (such as text boxes or radio buttons), which makes the latter useless for things like games. Recording revent sequence is also faster than writing automation code (on the other hand, one would need maintain a different revent log for each screen resolution). .. note:: For ChromeOS targets, UI Automator can only be used with android applications and not the ChomeOS host applications themselves. ================================================ FILE: doc/source/user_information/installation.rst ================================================ .. _installation: ************ Installation ************ .. contents:: Contents :depth: 2 :local: .. module:: wa This page describes the 3 methods of installing Workload Automation 3. The first option is to use :ref:`pip` which will install the latest release of WA, the latest development version from :ref:`github ` or via a :ref:`dockerfile`. Prerequisites ============= Operating System ---------------- WA runs on a native Linux install. It has been tested on recent Ubuntu releases, but other recent Linux distributions should work as well. It should run on either 32-bit or 64-bit OS, provided the correct version of dependencies (see below) are installed. Officially, **other environments are not supported**. WA has been known to run on Linux Virtual machines and in Cygwin environments, though additional configuration may be required in both cases (known issues include makings sure USB/serial connections are passed to the VM, and wrong python/pip binaries being picked up in Cygwin). WA *should* work on other Unix-based systems such as BSD or Mac OS X, but it has not been tested in those environments. WA *does not* run on Windows (though it should be possible to get limited functionality with minimal porting effort). .. Note:: If you plan to run Workload Automation on Linux devices only, SSH is required, and Android SDK is optional if you wish to run WA on Android devices at a later time. Then follow the steps to install the necessary python packages to set up WA. However, you would be starting off with a limited number of workloads that will run on Linux devices. Android SDK ----------- To interact with Android devices you will need to have the Android SDK with at least one platform installed. To install it, download the ADT Bundle from here_. Extract it and add ``/sdk/platform-tools`` and ``/sdk/tools`` to your ``PATH``. To test that you've installed it properly, run ``adb version``. The output should be similar to this:: adb version Android Debug Bridge version 1.0.39 .. _here: https://developer.android.com/sdk/index.html Once that is working, run :: android update sdk This will open up a dialog box listing available android platforms and corresponding API levels, e.g. ``Android 4.3 (API 18)``. For WA, you will need at least API level 18 (i.e. Android 4.3), though installing the latest is usually the best bet. Optionally (but recommended), you should also set ``ANDROID_HOME`` to point to the install location of the SDK (i.e. ``/sdk``). Python ------ Workload Automation 3 currently supports Python 3.5+ .. note:: If your system's default python version is still Python 2, please replace the commands listed here with their Python3 equivalent (e.g. python3, pip3 etc.) .. _pip: pip --- pip is the recommended package manager for Python. It is not part of standard Python distribution and would need to be installed separately. On Ubuntu and similar distributions, this may be done with APT:: sudo apt-get install python-pip .. note:: Some versions of pip (in particluar v1.5.4 which comes with Ubuntu 14.04) are know to set the wrong permissions when installing packages, resulting in WA failing to import them. To avoid this it is recommended that you update pip and setuptools before proceeding with installation:: sudo -H pip install --upgrade pip sudo -H pip install --upgrade setuptools If you do run into this issue after already installing some packages, you can resolve it by running :: sudo chmod -R a+r /usr/local/lib/python3.X/dist-packages sudo find /usr/local/lib/python3.X/dist-packages -type d -exec chmod a+x {} \; (The paths above will work for Ubuntu; they may need to be adjusted for other distros). Python Packages --------------- .. note:: pip should automatically download and install missing dependencies, so if you're using pip, you can skip this section. However some packages the will be installed have C plugins and will require Python development headers to install. You can get those by installing ``python-dev`` package in apt on Ubuntu (or the equivalent for your distribution). Workload Automation 3 depends on the following additional libraries: * pexpect * docutils * pySerial * pyYAML * python-dateutil * louie * pandas * devlib * wrapt * requests * colorama * future You can install these with pip:: sudo -H pip install pexpect sudo -H pip install pyserial sudo -H pip install pyyaml sudo -H pip install docutils sudo -H pip install python-dateutil sudo -H pip install devlib sudo -H pip install pandas sudo -H pip install louie sudo -H pip install wrapt sudo -H pip install requests sudo -H pip install colorama sudo -H pip install future Some of these may also be available in your distro's repositories, e.g. :: sudo apt-get install python-serial Distro package versions tend to be older, so pip installation is recommended. However, pip will always download and try to build the source, so in some situations distro binaries may provide an easier fall back. Please also note that distro package names may differ from pip packages. Optional Python Packages ------------------------ .. note:: Unlike the mandatory dependencies in the previous section, pip will *not* install these automatically, so you will have to explicitly install them if/when you need them. In addition to the mandatory packages listed in the previous sections, some WA functionality (e.g. certain plugins) may have additional dependencies. Since they are not necessary to be able to use most of WA, they are not made mandatory to simplify initial WA installation. If you try to use an plugin that has additional, unmet dependencies, WA will tell you before starting the run, and you can install it then. They are listed here for those that would rather install them upfront (e.g. if you're planning to use WA to an environment that may not always have Internet access). * nose * mock * daqpower * sphinx * sphinx_rtd_theme * psycopg2-binary .. _github: Installing ========== Installing the latest released version from PyPI (Python Package Index):: sudo -H pip install wlauto This will install WA along with its mandatory dependencies. If you would like to install all optional dependencies at the same time, do the following instead:: sudo -H pip install wlauto[all] Alternatively, you can also install the latest development version from GitHub (you will need git installed for this to work):: git clone git@github.com:ARM-software/workload-automation.git workload-automation cd workload-automation sudo -H python setup.py install .. note:: Please note that if using pip to install from github this will most likely result in an older and incompatible version of devlib being installed alongside WA. If you wish to use pip please also manually install the latest version of `devlib `_. .. note:: Please note that while a `requirements.txt` is included, this is designed to be a reference of known working packages rather to than to be used as part of a standard installation. The version restrictions in place as part of `setup.py` should automatically ensure the correct packages are install however if encountering issues please try updating/downgrading to the package versions list within. If the above succeeds, try :: wa --version Hopefully, this should output something along the lines of :: "Workload Automation version $version". .. _dockerfile: Dockerfile ============ As an alternative we also provide a Dockerfile that will create an image called wadocker, and is preconfigured to run WA and devlib. Please note that the build process automatically accepts the licenses for the Android SDK, so please be sure that you are willing to accept these prior to building and running the image in a container. The Dockerfile can be found in the "extras" directory or online at ``_ which contains additional information about how to build and to use the file. (Optional) Post Installation ============================ Some WA plugins have additional dependencies that need to be satisfied before they can be used. Not all of these can be provided with WA and so will need to be supplied by the user. They should be placed into ``~/.workload_automation/dependencies/`` so that WA can find them (you may need to create the directory if it doesn't already exist). You only need to provide the dependencies for workloads you want to use. .. _apk_files: APK Files --------- APKs are application packages used by Android. These are necessary to install on a device when running an :ref:`ApkWorkload ` or derivative. Please see the workload description using the :ref:`show ` command to see which version of the apk the UI automation has been tested with and place the apk in the corresponding workloads dependency directory. Automation may also work with other versions (especially if it's only a minor or revision difference -- major version differences are more likely to contain incompatible UI changes) but this has not been tested. As a general rule we do not guarantee support for the latest version of an app and they are updated on an as needed basis. We do however attempt to support backwards compatibility with previous major releases however beyond this support will likely be dropped. Gaming Workloads ---------------- Some workloads (games, demos, etc) cannot be automated using Android's UIAutomator framework because they render the entire UI inside a single OpenGL surface. For these, an interaction session needs to be recorded so that it can be played back by WA. These recordings are device-specific, so they would need to be done for each device you're planning to use. The tool for doing is ``revent`` and it is packaged with WA. You can find instructions on how to use it in the :ref:`How To ` section. This is the list of workloads that rely on such recordings: +------------------+ | angrybirds_rio | +------------------+ | templerun2 | +------------------+ +------------------+ .. _assets_repository: Maintaining Centralized Assets Repository ----------------------------------------- If there are multiple users within an organization that may need to deploy assets for WA plugins, that organization may wish to maintain a centralized repository of assets that individual WA installs will be able to automatically retrieve asset files from as they are needed. This repository can be any directory on a network filer that mirrors the structure of ``~/.workload_automation/dependencies``, i.e. has a subdirectories named after the plugins which assets they contain. Individual WA installs can then set ``remote_assets_path`` setting in their config to point to the local mount of that location. (Optional) Uninstalling ======================= If you have installed Workload Automation via ``pip`` and wish to remove it, run this command to uninstall it:: sudo -H pip uninstall wa .. Note:: This will *not* remove any user configuration (e.g. the ~/.workload_automation directory) (Optional) Upgrading ==================== To upgrade Workload Automation to the latest version via ``pip``, run:: sudo -H pip install --upgrade --no-deps wa ================================================ FILE: doc/source/user_information/user_guide.rst ================================================ .. _user-guide: ********** User Guide ********** This guide will show you how to quickly start running workloads using Workload Automation 3. .. contents:: Contents :depth: 2 :local: --------------------------------------------------------------- Install ======= .. note:: This is a quick summary. For more detailed instructions, please see the :ref:`installation` section. Make sure you have Python 3.5+ and a recent Android SDK with API level 18 or above installed on your system. A complete install of the Android SDK is required, as WA uses a number of its utilities, not just adb. For the SDK, make sure that either ``ANDROID_HOME`` environment variable is set, or that ``adb`` is in your ``PATH``. .. Note:: If you plan to run Workload Automation on Linux devices only, SSH is required, and Android SDK is optional if you wish to run WA on Android devices at a later time. However, you would be starting off with a limited number of workloads that will run on Linux devices. In addition to the base Python install, you will also need to have ``pip`` (Python's package manager) installed as well. This is usually a separate package. Once you have those, you can install WA with:: sudo -H pip install wlauto This will install Workload Automation on your system, along with its mandatory dependencies. Alternatively we provide a Dockerfile that which can be used to create a Docker image for running WA along with its dependencies. More information can be found in the :ref:`Installation ` section. (Optional) Verify installation ------------------------------- Once the tarball has been installed, try executing :: wa -h You should see a help message outlining available subcommands. (Optional) APK files -------------------- A large number of WA workloads are installed as APK files. These cannot be distributed with WA and so you will need to obtain those separately. For more details, please see the :ref:`installation ` section. List Command ============ In order to get started with using WA we first we need to find out what is available to use. In order to do this we can use the :ref:`list ` command followed by the type of plugin that you wish to see. For example to see what workloads are available along with a short description of each you run:: wa list workloads Which will give an output in the format of: .. code-block:: none adobereader: The Adobe Reader workflow carries out the following typical productivity tasks. androbench: Executes storage performance benchmarks angrybirds_rio: Angry Birds Rio game. antutu: Executes Antutu 3D, UX, CPU and Memory tests applaunch: This workload launches and measures the launch time of applications for supporting workloads. benchmarkpi: Measures the time the target device takes to run and complete the Pi calculation algorithm. dhrystone: Runs the Dhrystone benchmark. exoplayer: Android ExoPlayer geekbench: Geekbench provides a comprehensive set of benchmarks engineered to quickly and accurately measure processor and memory performance. #.. The same syntax can be used to display ``commands``, ``energy_instrument_backends``, ``instruments``, ``output_processors``, ``resource_getters``, ``targets``. Once you have found the plugin you are looking for you can use the :ref:`show ` command to display more detailed information. Alternatively please see the :ref:`Plugin Reference ` for an online version. Show Command ============ If you want to learn more information about a particular plugin, such as the parameters it supports, you can use the "show" command:: wa show dhrystone If you have ``pandoc`` installed on your system, this will display man page-like description of the plugin, and the parameters it supports. If you do not have ``pandoc``, you will instead see the same information as raw restructured text. Configure Your Device ===================== There are multiple options for configuring your device depending on your particular use case. You can either add your configuration to the default configuration file ``config.yaml``, under the ``$WA_USER_DIRECTORY/`` directory or you can specify it in the ``config`` section of your agenda directly. Alternatively if you are using multiple devices, you may want to create separate config files for each of your devices you will be using. This allows you to specify which device you would like to use for a particular run and pass it as an argument when invoking with the ``-c`` flag. :: wa run dhrystone -c my_device.yaml By default WA will use the “most specific” configuration available for example any configuration specified inside an agenda will override a passed configuration file which will in turn overwrite the default configuration file. .. note:: For a more information about configuring your device please see :ref:`Setting Up A Device `. Android ------- By default, the device WA will use is set to 'generic_android'. WA is configured to work with a generic Android device through ``adb``. If you only have one device listed when you execute ``adb devices``, and your device has a standard Android configuration, then no extra configuration is required. However, if your device is connected via network, you will have to manually execute ``adb connect `` (or specify this in your :ref:`agenda `) so that it appears in the device listing. If you have multiple devices connected, you will need to tell WA which one you want it to use. You can do that by setting ``device`` in the device_config section. .. code-block:: yaml # ... device_config: device: 'abcdef0123456789' # ... # ... Linux ----- First, set the device to 'generic_linux' .. code-block:: yaml # ... device: 'generic_linux' # ... Find the device_config section and add these parameters .. code-block:: yaml # ... device_config: host: '192.168.0.100' username: 'root' password: 'password' # ... # ... Parameters: - Host is the IP of your target Linux device - Username is the user for the device - Password is the password for the device Enabling and Disabling Augmentations --------------------------------------- Augmentations are the collective name for "instruments" and "output processors" in WA3. Some augmentations are enabled by default after your initial install of WA, which are specified in the ``config.yaml`` file located in your ``WA_USER_DIRECTORY``, typically ``~/.workload_autoamation``. .. note:: Some Linux devices may not be able to run certain augmentations provided by WA (e.g. cpufreq is disabled or unsupported by the device). .. code-block:: yaml # ... augmentations: # Records the time it took to run the workload - execution_time # Collects /proc/interrupts before and after execution and does a diff. - interrupts # Collects the contents of/sys/devices/system/cpu before and after # execution and does a diff. - cpufreq # Generate a txt file containing general status information about # which runs failed and which were successful. - status # ... If you only wanted to keep the 'execution_time' instrument enabled, you can comment out the rest of the list augmentations to disable them. This should give you basic functionality. If you are working with a development board or you need some advanced functionality additional configuration may be required. Please see the :ref:`device setup ` section for more details. .. note:: In WA2 'Instrumentation' and 'Result Processors' were divided up into their own sections in the agenda. In WA3 they now fall under the same category of 'augmentations'. For compatibility the old naming structure is still valid however using the new entry names is recommended. Running Your First Workload =========================== The simplest way to run a workload is to specify it as a parameter to WA ``run`` :ref:`run ` sub-command:: wa run dhrystone You will see INFO output from WA as it executes each stage of the run. A completed run output should look something like this:: INFO Creating output directory. INFO Initializing run INFO Connecting to target INFO Setting up target INFO Initializing execution context INFO Generating jobs INFO Loading job wk1 (dhrystone) [1] INFO Installing instruments INFO Installing output processors INFO Starting run INFO Initializing run INFO Initializing job wk1 (dhrystone) [1] INFO Running job wk1 INFO Configuring augmentations INFO Configuring target for job wk1 (dhrystone) [1] INFO Setting up job wk1 (dhrystone) [1] INFO Running job wk1 (dhrystone) [1] INFO Tearing down job wk1 (dhrystone) [1] INFO Completing job wk1 INFO Job completed with status OK INFO Finalizing run INFO Finalizing job wk1 (dhrystone) [1] INFO Done. INFO Run duration: 9 seconds INFO Ran a total of 1 iterations: 1 OK INFO Results can be found in wa_output Once the run has completed, you will find a directory called ``wa_output`` in the location where you have invoked ``wa run``. Within this directory, you will find a "results.csv" file which will contain results obtained for dhrystone, as well as a "run.log" file containing detailed log output for the run. You will also find a sub-directory called 'wk1-dhrystone-1' that contains the results for that iteration. Finally, you will find various additional information in the ``wa_output/__meta`` subdirectory for example information extracted from the target and a copy of the agenda file. The contents of iteration-specific subdirectories will vary from workload to workload, and, along with the contents of the main output directory, will depend on the augmentations that were enabled for that run. The ``run`` sub-command takes a number of options that control its behaviour, you can view those by executing ``wa run -h``. Please see the :ref:`invocation` section for details. Create an Agenda ================ Simply running a single workload is normally of little use. Typically, you would want to specify several workloads, setup the device state and, possibly, enable additional augmentations. To do this, you would need to create an "agenda" for the run that outlines everything you want WA to do. Agendas are written using YAML_ markup language. A simple agenda might look like this: .. code-block:: yaml config: augmentations: - ~execution_time - targz iterations: 2 workloads: - memcpy - name: dhrystone params: mloops: 5 threads: 1 This agenda: - Specifies two workloads: memcpy and dhrystone. - Specifies that dhrystone should run in one thread and execute five million loops. - Specifies that each of the two workloads should be run twice. - Enables the targz output processor, in addition to the output processors enabled in the config.yaml. - Disables execution_time instrument, if it is enabled in the config.yaml An agenda can be created using WA's ``create`` :ref:`command ` or in a text editor and saved as a YAML file. For more options please see the :ref:`agenda` documentation. .. _YAML: http://en.wikipedia.org/wiki/YAML .. _using-the-create-command: Using the Create Command ------------------------- The easiest way to create an agenda is to use the 'create' command. For more in-depth information please see the :ref:`Create Command ` documentation. In order to populate the agenda with relevant information you can supply all of the plugins you wish to use as arguments to the command, for example if we want to create an agenda file for running ``dhrystone`` on a `generic_android` device and we want to enable the ``execution_time`` and ``trace-cmd`` instruments and display the metrics using the ``csv`` output processor. We would use the following command:: wa create agenda generic_android dhrystone execution_time trace-cmd csv -o my_agenda.yaml This will produce a ``my_agenda.yaml`` file containing all the relevant configuration for the specified plugins along with their default values as shown below: .. code-block:: yaml config: augmentations: - execution_time - trace-cmd - csv iterations: 1 device: generic_android device_config: adb_server: null adb_port: null big_core: null core_clusters: null core_names: null device: null disable_selinux: true executables_directory: null load_default_modules: true logcat_poll_period: null model: null modules: null package_data_directory: /data/data shell_prompt: ! '8:^.*(shell|root)@.*:/\S* [#$] ' working_directory: null execution_time: {} trace-cmd: buffer_size: null buffer_size_step: 1000 events: - sched* - irq* - power* - thermal* functions: null no_install: false report: true report_on_target: false mode: write-to-memory csv: extra_columns: null use_all_classifiers: false workloads: - name: dhrystone params: cleanup_assets: true delay: 0 duration: 0 mloops: 0 taskset_mask: 0 threads: 4 Run Command ============ These examples show some useful options that can be used with WA's ``run`` command. Once we have created an agenda to use it with WA we can pass it as a argument to the run command e.g.:: wa run (e.g. wa run ~/myagenda.yaml) By default WA will use the "wa_output" directory to stores its output however to redirect the output to a different directory we can use:: wa run dhrystone -d my_output_directory We can also tell WA to use additional config files by supplying it with the ``-c`` argument. One use case for passing additional config files is if you have multiple devices you wish test with WA, you can store the relevant device configuration in individual config files and then pass the file corresponding to the device you wish to use for that particular test. .. note:: As previously mentioned, any more specific configuration present in the agenda file will overwrite the corresponding config parameters specified in the config file(s). :: wa run -c myconfig.yaml ~/myagenda.yaml To use the same output directory but override the existing contents to store new dhrystone results we can specify the ``-f`` argument:: wa run -f dhrystone To display verbose output while running memcpy:: wa run --verbose memcpy .. _output_directory: Output ====== The output directory will contain subdirectories for each job that was run, which will in turn contain the generated metrics and artifacts for each job. The directory will also contain a ``run.log`` file containing the complete log output for the run, and a ``__meta`` directory with the configuration and metadata for the run. Metrics are serialized inside ``result.json`` files inside each job's subdirectory. There may also be a ``__failed`` directory containing failed attempts for jobs that have been re-run. Augmentations may add additional files at the run or job directory level. The default configuration has ``status`` and ``csv`` augmentations enabled which generate a ``status.txt`` containing status summary for the run and individual jobs, and a ``results.csv`` containing metrics from all jobs in a CSV table, respectively. See :ref:`output_directory_structure` for more information. In order to make it easier to access WA results from scripts, WA provides an API that parses the contents of the output directory: .. code-block:: pycon >>> from wa import RunOutput >>> ro = RunOutput('./wa_output') >>> for job in ro.jobs: ... if job.status != 'OK': ... print('Job "{}" did not complete successfully: {}'.format(job, job.status)) ... continue ... print('Job "{}":'.format(job)) ... for metric in job.metrics: ... if metric.units: ... print('\t{}: {} {}'.format(metric.name, metric.value, metric.units)) ... else: ... print('\t{}: {}'.format(metric.name, metric.value)) ... Job "wk1-dhrystone-1": thread 0 score: 20833333 thread 0 DMIPS: 11857 thread 1 score: 24509804 thread 1 DMIPS: 13950 thread 2 score: 18011527 thread 2 DMIPS: 10251 thread 3 score: 26371308 thread 3 DMIPS: 15009 time: 1.001251 seconds total DMIPS: 51067 total score: 89725972 execution_time: 1.4834280014 seconds See :ref:`output_processing_api` for details. Uninstall ========= If you have installed Workload Automation via ``pip``, then run this command to uninstall it:: sudo pip uninstall wa .. Note:: It will *not* remove any user configuration (e.g. the ~/.workload_automation directory). Upgrade ======= To upgrade Workload Automation to the latest version via ``pip``, run:: sudo pip install --upgrade --no-deps wa ================================================ FILE: doc/source/user_information/user_reference/agenda.rst ================================================ .. _agenda-reference: Agenda ------ An agenda can be thought of as a way to define an experiment as it specifies what is to be done during a Workload Automation run. This includes which workloads will be run, with what configuration and which augmentations will be enabled, etc. Agenda syntax is designed to be both succinct and expressive and is written using YAML notation. There are three valid top level entries which are: :ref:`config `, :ref:`workloads `, :ref:`sections `. An example agenda can be seen here: .. code-block:: yaml config: # General configuration for the run user_directory: ~/.workload_automation/ default_output_directory: 'wa_output' augmentations: # A list of all augmentations to be enabled and disabled. - trace-cmd - csv - ~dmesg # Disable the dmseg augmentation iterations: 1 # How many iterations to run each workload by default device: generic_android device_config: device: R32C801B8XY # The adb name of our device we want to run on disable_selinux: true load_default_modules: true package_data_directory: /data/data trace-cmd: # Provide config for the trace-cmd augmentation. buffer_size_step: 1000 events: - sched* - irq* - power* - thermal* no_install: false report: true report_on_target: false mode: write-to-disk csv: # Provide config for the csv augmentation use_all_classifiers: true sections: # Configure what sections we want and their settings - id: LITTLES # Run workloads just on the LITTLE cores runtime_parameters: # Supply RT parameters to be used for this section num_little_cores: 4 num_big_cores: 0 - id: BIGS # Run workloads just on the big cores runtime_parameters: # Supply RT parameters to be used for this section num_big_cores: 4 num_little_cores: 0 workloads: # List which workloads should be run - name: benchmarkpi augmentations: - ~trace-cmd # Disable the trace-cmd instrument for this workload iterations: 2 # Override the global number of iteration for this workload params: # Specify workload parameters for this workload cleanup_assets: true exact_abi: false force_install: false install_timeout: 300 markers_enabled: false prefer_host_package: true strict: false uninstall: false - dhrystone # Run the dhrystone workload with all default config This agenda will result in a total of 6 jobs being executed on our Android device, 4 of which running the BenchmarkPi workload with its customized workload parameters and 2 running dhrystone with its default configuration. The first 3 will be running on only the little cores and the latter running on the big cores. For all of the jobs executed the output will be processed by the ``csv`` processor,(plus any additional processors enabled in the default config file), however trace data will only be collected for the dhrystone jobs. .. _config-agenda-entry: config ^^^^^^^ This section is used to provide overall configuration for WA and its run. The ``config`` section of an agenda will be merged with any other configuration files provided (including the default config file) and merged with the most specific configuration taking precedence (see :ref:`Config Merging ` for more information. The only restriction is that ``run_name`` can only be specified in the config section of an agenda as this would not make sense to set as a default. Within this section there are multiple distinct types of configuration that can be provided. However in addition to the options listed here all configuration that is available for :ref:`sections ` can also be entered here and will be globally applied. Configuration """"""""""""" The first is to configure the behaviour of WA and how a run as a whole will behave. The most common options that that you may want to specify are: :device: The name of the 'device' that you wish to perform the run on. This name is a combination of a devlib `Platform `_ and `Target `_. To see the available options please use ``wa list targets``. :device_config: The is a dict mapping allowing you to configure which target to connect to (e.g. ``host`` for an SSH connection or ``device`` to specific an ADB name) as well as configure other options for the device for example the ``working_directory`` or the list of ``modules`` to be loaded onto the device. (For more information please see :ref:`here `) :execution_order: Defines the order in which the agenda spec will be executed. :reboot_policy: Defines when during execution of a run a Device will be rebooted. :max_retries: The maximum number of times failed jobs will be retried before giving up. :allow_phone_home: Prevent running any workloads that are marked with ‘phones_home’. For more information and a full list of these configuration options please see :ref:`Run Configuration ` and :ref:`Meta Configuration `. Plugins """"""" :augmentations: Specify a list of which augmentations should be enabled (or if prefixed with a ``~``, disabled). .. note:: While augmentations can be enabled and disabled on a per workload basis, they cannot yet be re-configured part way through a run and the configuration provided as part of an agenda config section or separate config file will be used for all jobs in a WA run. :: You can also use this section to supply configuration for specific plugins, such as augmentations, workloads, resource getters etc. To do this the plugin name you wish to configure should be provided as an entry in this section and should contain a mapping of configuration options to their desired settings. If configuration is supplied for a plugin that is not currently enabled then it will simply be ignored. This allows for plugins to be temporarily removed without also having to remove their configuration, or to provide a set of defaults for a plugin which can then be overridden. :: Some plugins provide global aliases which can set one or more configuration options at once, and these can also be specified here. For example if you specify a value for the entry ``remote_assets_url`` this will set the URL the http resource getter will use when searching for any missing assets. --------------------------- .. _workloads-agenda-entry: workloads ^^^^^^^^^ Here you can specify a list of workloads to be run. If you wish to run a workload with all default values then you can specify the workload name directly as an entry, otherwise a dict mapping should be provided. Any settings provided here will be the most specific and therefore override any other more generalised configuration for that particular workload spec. The valid entries are as follows: :workload_name: **(Mandatory)** The name of the workload to be run :iterations: Specify how many iterations the workload should be run :label: Similar to IDs but do not have the uniqueness restriction. If specified, labels will be used by some output processors instead of (or in addition to) the workload name. For example, the csv output processor will put the label in the "workload" column of the CSV file. :augmentations: The instruments and output processors to enable (or disabled using a ~) during this workload. :classifiers: Classifiers allow you to tag metrics from this workload spec which are often used to help identify what runtime parameters were used when post processing results. :workload_parameters: Any parameters to configure that particular workload in a dict form. Alias: ``workload_params`` .. note:: You can see available parameters for a given workload with the :ref:`show command ` or look it up in the :ref:`Plugin Reference `. :runtime_parameters: A dict mapping of any runtime parameters that should be set for the device for that particular workload. For available parameters please see :ref:`runtime parameters `. Alias: ``runtime_parms`` .. note:: Unless specified elsewhere these configurations will not be undone once the workload has finished. I.e. if the frequency of a core is changed it will remain at that frequency until otherwise changed. .. note:: There is also a shorter ``params`` alias available, however this alias will be interpreted differently depending on whether it is used in workload entry, in which case it will be interpreted as ``workload_params``, or at global config or section (see below) level, in which case it will be interpreted as ``runtime_params``. --------------------------- .. _sections-agenda-entry: sections ^^^^^^^^ Sections are used for for grouping sets of configuration together in order to reduce the need for duplicated configuration (for more information please see :ref:`Sections `). Each section specified will be applied for each entry in the ``workloads`` section. The valid configuration entries are the same as the ``"workloads"`` section as mentioned above, except you can additionally specify: :workloads: An entry which can be provided with the same configuration entries as the :ref:`workloads ` top level entry. ================================================ FILE: doc/source/user_information/user_reference/configuration.rst ================================================ .. _configuration-specification: Configuration ============= .. include:: user_information/user_reference/agenda.rst --------------------- .. _run-configuration: Run Configuration ------------------ In addition to specifying run execution parameters through an agenda, the behaviour of WA can be modified through configuration file(s). The default configuration file is ``~/.workload_automation/config.yaml`` (the location can be changed by setting ``WA_USER_DIRECTORY`` environment variable, see :ref:`envvars` section below). This file will be created when you first run WA if it does not already exist. This file must always exist and will always be loaded. You can add to or override the contents of that file on invocation of Workload Automation by specifying an additional configuration file using ``--config`` option. Variables with specific names will be picked up by the framework and used to modify the behaviour of Workload automation e.g. the ``iterations`` variable might be specified to tell WA how many times to run each workload. --------------------- .. _available_settings: .. include:: run_config/Run_Configuration.rst --------------------- .. _meta-configuration: Meta Configuration ------------------ There are also a couple of settings are used to provide additional metadata for a run. These may get picked up by instruments or output processors to attach context to results. .. include:: run_config/Meta_Configuration.rst --------------------- .. _envvars: Environment Variables --------------------- In addition to standard configuration described above, WA behaviour can be altered through environment variables. These can determine where WA looks for various assets when it starts. :WA_USER_DIRECTORY: This is the location WA will look for config.yaml, plugins, dependencies, and it will also be used for local caches, etc. If this variable is not set, the default location is ``~/.workload_automation`` (this is created when WA is installed). .. note:: This location **must** be writable by the user who runs WA. :WA_LOG_BUFFER_CAPACITY: Specifies the capacity (in log records) for the early log handler which is used to buffer log records until a log file becomes available. If the is not set, the default value of ``1000`` will be used. This should sufficient for most scenarios, however this may need to be increased, e.g. if plugin loader scans a very large number of locations; this may also be set to a lower value to reduce WA's memory footprint on memory-constrained hosts. --------------------- .. include:: user_information/user_reference/runtime_parameters.rst --------------------- .. _config-merging: Configuration Merging --------------------- WA configuration can come from various sources of increasing priority, as well as being specified in a generic and specific manner. For example WA's global config file would be considered the least specific vs the parameters of a workload in an agenda which would be the most specific. WA has two rules for the priority of configuration: - Configuration from higher priority sources overrides configuration from lower priority sources. - More specific configuration overrides less specific configuration. There is a situation where these two rules come into conflict. When a generic configuration is given in config source of high priority and a specific configuration is given in a config source of lower priority. In this situation it is not possible to know the end users intention and WA will error. This functionality allows for defaults for plugins, targets etc. to be configured at a global level and then seamless overridden without the need to remove the high level configuration. Dependent on specificity, configuration parameters from different sources will have different inherent priorities. Within an agenda, the configuration in "workload" entries will be more specific than "sections" entries, which in turn are more specific than parameters in the "config" entry. .. _config-include: Configuration Includes ---------------------- It is possible to include other files in your config files and agendas. This is done by specifying ``include#`` (note the trailing hash) as a key in one of the mappings, with the value being the path to the file to be included. The path must be either absolute, or relative to the location of the file it is being included from (*not* to the current working directory). The path may also include ``~`` to indicate current user's home directory. The include is performed by removing the ``include#`` loading the contents of the specified into the mapping that contained it. In cases where the mapping already contains the key to be loaded, values will be merged using the usual merge method (for overwrites, values in the mapping take precedence over those from the included files). Below is an example of an agenda that includes other files. The assumption is that all of those files are in one directory .. code-block:: yaml # agenda.yaml config: augmentations: [trace-cmd] include#: ./my-config.yaml sections: - include#: ./section1.yaml - include#: ./section2.yaml include#: ./workloads.yaml .. code-block:: yaml # my-config.yaml augmentations: [cpufreq] .. code-block:: yaml # section1.yaml runtime_parameters: frequency: max .. code-block:: yaml # section2.yaml runtime_parameters: frequency: min .. code-block:: yaml # workloads.yaml workloads: - dhrystone - memcpy The above is equivalent to having a single file like this: .. code-block:: yaml # agenda.yaml config: augmentations: [cpufreq, trace-cmd] sections: - runtime_parameters: frequency: max - runtime_parameters: frequency: min workloads: - dhrystone - memcpy Some additional details about the implementation and its limitations: - The ``include#`` *must* be a key in a mapping, and the contents of the included file *must* be a mapping as well; it is not possible to include a list (e.g. in the examples above ``workload:`` part *must* be in the included file. - Being a key in a mapping, there can only be one ``include#`` entry per block. - The included file *must* have a ``.yaml`` extension. - Nested inclusions *are* allowed. I.e. included files may themselves include files; in such cases the included paths must be relative to *that* file, and not the "main" file. ================================================ FILE: doc/source/user_information/user_reference/invocation.rst ================================================ .. _invocation: Commands ======== Installing the wa package will add ``wa`` command to your system, which you can run from anywhere. This has a number of sub-commands, which can be viewed by executing :: wa -h Individual sub-commands are discussed in detail below. .. _run-command: Run --- The most common sub-command you will use is ``run``. This will run the specified workload(s) and process its resulting output. This takes a single mandatory argument which specifies what you want WA to run. This could be either a workload name, or a path to an agenda" file that allows to specify multiple workloads as well as a lot additional configuration (see :ref:`agenda` section for details). Executing :: wa run -h Will display help for this subcommand that will look something like this: .. code-block:: none usage: wa run [-h] [-c CONFIG] [-v] [--version] [-d DIR] [-f] [-i ID] [--disable INSTRUMENT] AGENDA Execute automated workloads on a remote device and process the resulting output. positional arguments: AGENDA Agenda for this workload automation run. This defines which workloads will be executed, how many times, with which tunables, etc. See example agendas in /usr/local/lib/python3.X/dist-packages/wa for an example of how this file should be structured. optional arguments: -h, --help show this help message and exit -c CONFIG, --config CONFIG specify an additional config.yaml -v, --verbose The scripts will produce verbose output. --version show program's version number and exit -d DIR, --output-directory DIR Specify a directory where the output will be generated. If the directory already exists, the script will abort unless -f option (see below) is used, in which case the contents of the directory will be overwritten. If this option is not specified, then wa_output will be used instead. -f, --force Overwrite output directory if it exists. By default, the script will abort in this situation to prevent accidental data loss. -i ID, --id ID Specify a workload spec ID from an agenda to run. If this is specified, only that particular spec will be run, and other workloads in the agenda will be ignored. This option may be used to specify multiple IDs. --disable INSTRUMENT Specify an instrument or output processor to disable from the command line. This equivalent to adding "~{metavar}" to the instruments list in the agenda. This can be used to temporarily disable a troublesome instrument for a particular run without introducing permanent change to the config (which one might then forget to revert). This option may be specified multiple times. .. _list-command: List ---- This lists all plugins of a particular type. For example :: wa list instruments will list all instruments currently included in WA. The list will consist of plugin names and short descriptions of the functionality they offer e.g. .. code-block:: none #.. cpufreq: Collects dynamic frequency (DVFS) settings before and after workload execution. dmesg: Collected dmesg output before and during the run. energy_measurement: This instrument is designed to be used as an interface to the various energy measurement instruments located in devlib. execution_time: Measure how long it took to execute the run() methods of a Workload. file_poller: Polls the given files at a set sample interval. The values are output in CSV format. fps: Measures Frames Per Second (FPS) and associated metrics for a workload. #.. You can use the same syntax to quickly display information about ``commands``, ``energy_instrument_backends``, ``instruments``, ``output_processors``, ``resource_getters``, ``targets`` and ``workloads`` .. _show-command: Show ---- This will show detailed information about an plugin (workloads, targets, instruments etc.), including a full description and any relevant parameters/configuration that are available. For example executing :: wa show benchmarkpi will produce something like: :: benchmarkpi ----------- Measures the time the target device takes to run and complete the Pi calculation algorithm. http://androidbenchmark.com/howitworks.php from the website: The whole idea behind this application is to use the same Pi calculation algorithm on every Android Device and check how fast that process is. Better calculation times, conclude to faster Android devices. This way you can also check how lightweight your custom made Android build is. Or not. As Pi is an irrational number, Benchmark Pi does not calculate the actual Pi number, but an approximation near the first digits of Pi over the same calculation circles the algorithms needs. So, the number you are getting in milliseconds is the time your mobile device takes to run and complete the Pi calculation algorithm resulting in a approximation of the first Pi digits. parameters ~~~~~~~~~~ cleanup_assets : boolean If ``True``, if assets are deployed as part of the workload they will be removed again from the device as part of finalize. default: ``True`` package_name : str The package name that can be used to specify the workload apk to use. install_timeout : integer Timeout for the installation of the apk. constraint: ``value > 0`` default: ``300`` version : str The version of the package to be used. variant : str The variant of the package to be used. strict : boolean Whether to throw an error if the specified package cannot be found on host. force_install : boolean Always re-install the APK, even if matching version is found already installed on the device. uninstall : boolean If ``True``, will uninstall workload's APK as part of teardown.' exact_abi : boolean If ``True``, workload will check that the APK matches the target device ABI, otherwise any suitable APK found will be used. markers_enabled : boolean If set to ``True``, workloads will insert markers into logs at various points during execution. These markers may be used by other plugins or post-processing scripts to provide measurements or statistics for specific parts of the workload execution. .. note:: You can also use this command to view global settings by using ``wa show settings`` .. _create-command: Create ------ This aids in the creation of new WA-related objects for example agendas and workloads. For more detailed information on creating workloads please see the :ref:`adding a workload ` section for more details. As an example to create an agenda that will run the dhrystone and memcpy workloads that will use the status and hwmon augmentations, run each test 3 times and save into the file ``my_agenda.yaml`` the following command can be used:: wa create agenda dhrystone memcpy status hwmon -i 3 -o my_agenda.yaml Which will produce something like:: config: augmentations: - status - hwmon status: {} hwmon: {} iterations: 3 workloads: - name: dhrystone params: cleanup_assets: true delay: 0 duration: 0 mloops: 0 taskset_mask: 0 threads: 4 - name: memcpy params: buffer_size: 5242880 cleanup_assets: true cpus: null iterations: 1000 This will be populated with default values which can then be customised for the particular use case. Additionally the create command can be used to initialize (and update) a Postgres database which can be used by the ``postgres`` output processor. The most of database connection parameters have a default value however they can be overridden via command line arguments. When initializing the database WA will also save the supplied parameters into the default user config file so that they do not need to be specified time the output processor is used. As an example if we had a database server running on at 10.0.0.2 using the standard port we could use the following command to initialize a database for use with WA:: wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd This will log into the database server with the supplied credentials and create a database (defaulting to 'wa') and will save the configuration to the ``~/.workload_automation/config.yaml`` file. With updates to WA there may be changes to the database schema used. In this case the create command can also be used with the ``-U`` flag to update the database to use the new schema as follows:: wa create database -a 10.0.0.2 -u my_username -p Pa55w0rd -U This will upgrade the database sequentially until the database schema is using the latest version. .. _process-command: Process -------- This command allows for output processors to be ran on data that was produced by a previous run. There are 2 ways of specifying which processors you wish to use, either passing them directly as arguments to the process command with the ``--processor`` argument or by providing an additional config file with the ``--config`` argument. Please note that by default the process command will not rerun processors that have already been ran during the run, in order to force a rerun of the processors you can specific the ``--force`` argument. Additionally if you have a directory containing multiple run directories you can specify the ``--recursive`` argument which will cause WA to walk the specified directory processing all the WA output sub-directories individually. As an example if we had performed multiple experiments and have the various WA output directories in our ``my_experiments`` directory, and we now want to process the outputs with a tool that only supports CSV files. We can easily generate CSV files for all the runs contained in our directory using the CSV processor by using the following command:: wa process -r -p csv my_experiments .. _record_command: Record ------ This command simplifies the process of recording revent files. It will automatically deploy revent and has options to automatically open apps and record specified stages of a workload. Revent allows you to record raw inputs such as screen swipes or button presses. This can be useful for recording inputs for workloads such as games that don't have XML UI layouts that can be used with UIAutomator. As a drawback from this, revent recordings are specific to the device type they were recorded on. WA uses two parts to the names of revent recordings in the format, ``{device_name}.{suffix}.revent``. - device_name can either be specified manually with the ``-d`` argument or it can be automatically determined. On Android device it will be obtained from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``. - suffix is used by WA to determine which part of the app execution the recording is for, currently these are either ``setup``, ``run``, ``extract_results`` or ``teardown``. All stages except ``run`` are optional for playback and to specify which stages should be recorded the ``-s``, ``-r``, ``-e`` or ``-t`` arguments respectively, or optionally ``-a`` to indicate all stages should be recorded. The full set of options for this command are:: usage: wa record [-h] [-c CONFIG] [-v] [--version] [-d DEVICE] [-o FILE] [-s] [-r] [-e] [-t] [-a] [-C] [-p PACKAGE | -w WORKLOAD] optional arguments: -h, --help show this help message and exit -c CONFIG, --config CONFIG specify an additional config.yaml -v, --verbose The scripts will produce verbose output. --version show program's version number and exit -d DEVICE, --device DEVICE Specify the device on which to run. This will take precedence over the device (if any) specified in configuration. -o FILE, --output FILE Specify the output file -s, --setup Record a recording for setup stage -r, --run Record a recording for run stage -e, --extract_results Record a recording for extract_results stage -t, --teardown Record a recording for teardown stage -a, --all Record recordings for available stages -C, --clear Clear app cache before launching it -p PACKAGE, --package PACKAGE Android package to launch before recording -w WORKLOAD, --workload WORKLOAD Name of a revent workload (mostly games) For more information please see :ref:`Revent Recording `. .. _replay-command: Replay ------ Alongside ``record`` wa also has a command to playback a single recorded revent file. It behaves similar to the ``record`` command taking a subset of the same options allowing you to automatically launch a package on the device :: usage: wa replay [-h] [-c CONFIG] [-v] [--debug] [--version] [-p PACKAGE] [-C] revent positional arguments: revent The name of the file to replay optional arguments: -h, --help show this help message and exit -c CONFIG, --config CONFIG specify an additional config.py -v, --verbose The scripts will produce verbose output. --debug Enable debug mode. Note: this implies --verbose. --version show program's version number and exit -p PACKAGE, --package PACKAGE Package to launch before recording -C, --clear Clear app cache before launching it For more information please see :ref:`Revent Replaying `. ================================================ FILE: doc/source/user_information/user_reference/output_directory.rst ================================================ .. _output_directory_structure: Output Directory Structure ========================== This is an overview of WA output directory structure. .. note:: In addition to files and subdirectories described here, other content may present in the output directory for a run, depending on the enabled augmentations. Overview -------- The output directory will contain a subdirectory for every job iteration that was run, as well as some additional entries. The following diagram illustrates the typical structure of WA output directory:: wa_output/ ├── __meta/ │ ├── config.json │ ├── jobs.json │ ├── raw_config │ │   ├── cfg0-config.yaml │ │   └── agenda.yaml │ ├── run_info.json │ └── target_info.json ├── __failed/ │ └── wk1-dhrystone-1-attempt1 ├── wk1-dhrystone-1/ │ └── result.json ├── wk1-dhrystone-2/ │ └── result.json ├── wk2-memcpy-1/ │ └── result.json ├── wk2-memcpy-2/ │ └── result.json ├── result.json └── run.log This is the directory structure that would be generated after running two iterations each of ``dhrystone`` and ``memcpy`` workloads with no augmentations enabled, and with the first attempt at the first iteration of dhrystone having failed. You may notice that a number of directories named ``wk*-x-x`` were generated in the output directory structure. Each of these directories represents a :term:`job`. The name of the output directory is as stated :ref:`here `. Output Directory Entries ------------------------ result.json Contains a JSON structure describing the result of the execution, including collected metrics and artifacts. There will be one for each job execution, and one for the overall run. The run ``result.json`` will only contain metrics/artifacts for the run as a whole, and will not contain results for individual jobs. You typically would not access ``result.json`` files directly. Instead you would either enable augmentations to format the results in easier to manage form (such as CSV table), or use :ref:`output_processing_api` to access the results from scripts. run.log This is a log of everything that happened during the run, including all interactions with the target, and all the decisions made by the framework. The output is equivalent to what you would see on the console when running with ``--verbose`` option. .. note:: WA source contains a syntax file for Vim that will color the initial part of each log line, in a similar way to what you see on the console. This may be useful for quickly spotting error and warning messages when scrolling through the log. https://github.com/ARM-software/workload-automation/blob/next/extras/walog.vim __meta This directory contains configuration and run metadata. See :ref:`config_and_meta` below for details. __failed This directory will only be present if one or more job executions has failed and were re-run. This directory contains output directories for the failed attempts. .. _job_execution_subd: job execution output subdirectory Each subdirectory will be named ``__``, and will, at minimum, contain a ``result.json`` (see above). Additionally, it may contain raw output from the workload, and any additional artifacts (e.g. traces) generated by augmentations. Finally, if workload execution has failed, WA may gather some additional logging (such as the UI state at the time of failure) and place it here. .. _config_and_meta: Configuration and Metadata -------------------------- As stated above, the ``__meta`` directory contains run configuration and metadata. Typically, you would not access these files directly, but would use the :ref:`output_processing_api` to query the metadata. For more details about WA configuration see :ref:`configuration-specification`. config.json Contains the overall run configuration, such as target interface configuration, and job execution order, and various "meta-configuration" settings, such as default output path, verbosity level, and logging formatting. jobs.json Final configuration for all jobs, including enabled augmentations, workload and runtime parameters, etc. raw_config This directory contains copies of config file(s) and the agenda that were parsed in order to generate configuration for this run. Each config file is prefixed with ``cfg-``, where ```` is the number indicating the order (with respect to the other other config files) in which it was parsed, e.g. ``cfg0-config.yaml`` is always a copy of ``$WA_USER_DIRECTORY/config.yaml``. The one file without a prefix is the agenda. run_info.json Run metadata, e.g. duration, start/end timestamps and duration. target_info.json Extensive information about the target. This includes information about the target's CPUS configuration, kernel and userspace versions, etc. The exact content will vary depending on the target type (Android vs Linux) and what could accessed on a particular device (e.g. if ``/proc/config.gz`` exists on the target, the kernel config will be included). ================================================ FILE: doc/source/user_information/user_reference/runtime_parameters.rst ================================================ .. _runtime-parameters: Runtime Parameters ------------------ .. contents:: Contents :local: Runtime parameters are options that can be specified to automatically configure device at runtime. They can be specified at the global level in the agenda or for individual workloads. Example ^^^^^^^ Say we want to perform an experiment on an Android big.LITTLE devices to compare the power consumption between the big and LITTLE clusters running the dhrystone and benchmarkpi workloads. Assuming we have additional instrumentation active for this device that can measure the power the device is consuming, to reduce external factors we want to ensure that the device is in airplane mode turned on for all our tests and the screen is off only for our dhrystone run. We will then run 2 :ref:`sections ` will each enable a single cluster on the device, set the cores to their maximum frequency and disable all available idle states. .. code-block:: yaml config: runtime_parameters: airplane_mode: true #.. workloads: - name: dhrystone iterations: 1 runtime_parameters: screen_on: false unlock_screen: 'vertical' - name: benchmarkpi iterations: 1 sections: - id: LITTLES runtime_parameters: num_little_cores: 4 little_governor: userspace little_frequency: max little_idle_states: none num_big_cores: 0 - id: BIGS runtime_parameters: num_big_cores: 4 big_governor: userspace big_frequency: max big_idle_states: none num_little_cores: 0 HotPlug ^^^^^^^ Parameters: :num_cores: An ``int`` that specifies the total number of cpu cores to be online. :num__cores: An ``int`` that specifies the total number of that particular core to be online, the target will be queried and if the core_names can be determine a parameter for each of the unique core names will be available. :cpu_online: A ``boolean`` that specifies whether that particular cpu, e.g. cpu0 will be online. If big.LITTLE is detected for the device and additional 2 parameters are available: :num_big_cores: An ``int`` that specifies the total number of `big` cpu cores to be online. :num_little_cores: An ``int`` that specifies the total number of `little` cpu cores to be online. .. Note:: Please note that if the device in question is operating its own dynamic hotplugging then WA may be unable to set the CPU state or will be overridden. Unfortunately the method of disabling dynamic hot plugging will vary from device to device. CPUFreq ^^^^^^^ :frequency: An ``int`` that can be used to specify a frequency for all cores if there are common frequencies available. .. Note:: When settings the frequency, if the governor is not set to userspace then WA will attempt to set the maximum and minimum frequencies to mimic the desired behaviour. :max_frequency: An ``int`` that can be used to specify a maximum frequency for all cores if there are common frequencies available. :min_frequency: An ``int`` that can be used to specify a minimum frequency for all cores if there are common frequencies available. :governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available. :governor: A ``string`` that can be used to specify the governor for all cores if there are common governors available. :gov_tunables: A ``dict`` that can be used to specify governor tunables for all cores, unlike the other common parameters these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. :_frequency: An ``int`` that can be used to specify a frequency for cores of a particular type e.g. 'A72'. :_max_frequency: An ``int`` that can be used to specify a maximum frequency for cores of a particular type e.g. 'A72'. :_min_frequency: An ``int`` that can be used to specify a minimum frequency for cores of a particular type e.g. 'A72'. :_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'. :_governor: A ``string`` that can be used to specify the governor for cores of a particular type e.g. 'A72'. :_gov_tunables: A ``dict`` that can be used to specify governor tunables for cores of a particular type e.g. 'A72', these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. :cpu_frequency: An ``int`` that can be used to specify a frequency for a particular core e.g. 'cpu0'. :cpu_max_frequency: An ``int`` that can be used to specify a maximum frequency for a particular core e.g. 'cpu0'. :cpu_min_frequency: An ``int`` that can be used to specify a minimum frequency for a particular core e.g. 'cpu0'. :cpu_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'. :cpu_governor: A ``string`` that can be used to specify the governor for a particular core e.g. 'cpu0'. :cpu_gov_tunables: A ``dict`` that can be used to specify governor tunables for a particular core e.g. 'cpu0', these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. If big.LITTLE is detected for the device an additional set of parameters are available: :big_frequency: An ``int`` that can be used to specify a frequency for the big cores. :big_max_frequency: An ``int`` that can be used to specify a maximum frequency for the big cores. :big_min_frequency: An ``int`` that can be used to specify a minimum frequency for the big cores. :big_governor: A ``string`` that can be used to specify the governor for the big cores. :big_governor: A ``string`` that can be used to specify the governor for the big cores. :big_gov_tunables: A ``dict`` that can be used to specify governor tunables for the big cores, these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. :little_frequency: An ``int`` that can be used to specify a frequency for the little cores. :little_max_frequency: An ``int`` that can be used to specify a maximum frequency for the little cores. :little_min_frequency: An ``int`` that can be used to specify a minimum frequency for the little cores. :little_governor: A ``string`` that can be used to specify the governor for the little cores. :little_governor: A ``string`` that can be used to specify the governor for the little cores. :little_gov_tunables: A ``dict`` that can be used to specify governor tunables for the little cores, these are not validated at the beginning of the run therefore incorrect values will cause an error during runtime. CPUIdle ^^^^^^^ :idle_states: A ``string`` or list of strings which can be used to specify what idles states should be enabled for all cores if there are common idle states available. 'all' and 'none' are also valid entries as a shorthand :_idle_states: A ``string`` or list of strings which can be used to specify what idles states should be enabled for cores of a particular type e.g. 'A72'. 'all' and 'none' are also valid entries as a shorthand :cpu_idle_states: A ``string`` or list of strings which can be used to specify what idles states should be enabled for a particular core e.g. 'cpu0'. 'all' and 'none' are also valid entries as a shorthand If big.LITTLE is detected for the device and additional set of parameters are available: :big_idle_states: A ``string`` or list of strings which can be used to specify what idles states should be enabled for the big cores. 'all' and 'none' are also valid entries as a shorthand :little_idle_states: A ``string`` or list of strings which can be used to specify what idles states should be enabled for the little cores. 'all' and 'none' are also valid entries as a shorthand. Android Specific Runtime Parameters ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :brightness: An ``int`` between 0 and 255 (inclusive) to specify the brightness the screen should be set to. Defaults to ``127``. :airplane_mode: A ``boolean`` to specify whether airplane mode should be enabled for the device. :rotation: A ``String`` to specify the screen orientation for the device. Valid entries are ``NATURAL``, ``LEFT``, ``INVERTED``, ``RIGHT``. :screen_on: A ``boolean`` to specify whether the devices screen should be turned on. Defaults to ``True``. :unlock_screen: A ``String`` to specify how the devices screen should be unlocked. Unlocking screen is disabled by default. ``vertical``, ``diagonal`` and ``horizontal`` are the supported values (see :meth:`devlib.AndroidTarget.swipe_to_unlock`). Note that unlocking succeeds when no passcode is set. Since unlocking screen requires turning on the screen, this option overrides value of ``screen_on`` option. .. _setting-sysfiles: Setting Sysfiles ^^^^^^^^^^^^^^^^ In order to perform additional configuration of a target the ``sysfile_values`` runtime parameter can be used. The value for this parameter is a mapping (an associative array, in YAML) of file paths onto values that should be written into those files. ``sysfile_values`` is the only runtime parameter that is available for any (Linux) device. Other runtime parameters will depend on the specifics of the device used (e.g. its CPU cores configuration) as detailed above. .. note:: By default WA will attempt to verify that the any sysfile values were written correctly by reading the node back and comparing the two values. If you do not wish this check to happen, for example the node you are writing to is write only, you can append an ``!`` to the file path to disable this verification. For example the following configuration could be used to enable and verify that cpu0 is online, however will not attempt to check that its governor have been set to userspace:: - name: dhrystone runtime_params: sysfile_values: /sys/devices/system/cpu/cpu0/online: 1 /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor!: userspace ================================================ FILE: doc/source/user_information/user_reference.rst ================================================ .. _user_reference: *************** User Reference *************** .. contents:: Contents :depth: 2 :local: .. include:: user_information/user_reference/configuration.rst ------------------- .. include:: user_information/user_reference/invocation.rst ------------------- .. include:: user_information/user_reference/output_directory.rst ================================================ FILE: doc/source/user_information.rst ================================================ ================ User Information ================ .. contents:: Contents :depth: 4 :local: .. include:: user_information/installation.rst .. include:: user_information/user_guide.rst .. include:: user_information/how_to.rst .. include:: user_information/user_reference.rst ================================================ FILE: extras/Dockerfile ================================================ # This Dockerfile creates an image for use with Workload Automation # and/or devlib. # # To build this Docker image, please run the following command from # this directory: # # docker build -t wa . # # This will create an image called wa, which is preconfigured to # run WA and devlib. Please note that the build process automatically # accepts the licenses for the Android SDK, so please be sure that you # are willing to accept these prior to building and running the image # in a container. # # To run the container, please run the following command from the # directory you wish to work from: # # docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa # # If using selinux you may need to add the `z` option when mounting # volumes e.g.: # --volume ${PWD}:/workspace:z # Warning: Please ensure you do not use this option when mounting # system directores. For more information please see: # https://docs.docker.com/storage/bind-mounts/#configure-the-selinux-label # # The above command starts the container in privileged mode, with # access to USB devices. The current directory is mounted into the # image, allowing you to work from there. Any files written to this # directory are directly written to the host. Additional "volumes", # such as required assets, can be mounted into the container using a # second --volume command. # # If you require access to a TTY from the Docker container, please # also mount this into the container in the same style as is used to # mount USB devices. For example: # # docker run -it --privileged -v /dev/ttyUSB0:/dev/ttyUSB0 -v /dev/bus/usb:/dev/bus/usb --volume ${PWD}:/workspace --workdir /workspace wa # # When you are finished, please run `exit` to leave the container. # # The relevant environment variables are stored in a separate # file which is automatically sourced in an interactive shell. # If running from a non-interactive environment this can # be manually sourced with `source /home/wa/.wa_environment` # # NOTE: Please make sure that the ADB server is NOT running on the # host. If in doubt, run `adb kill-server` before running the docker # container. # # We want to make sure to base this on a recent ubuntu release FROM ubuntu:20.04 # Please update the references below to use different versions of # devlib, WA or the Android SDK ARG DEVLIB_REF=v1.3.4 ARG WA_REF=v3.3.1 ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip # Set a default timezone to use ENV TZ=Europe/London ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y \ apache2-utils \ bison \ cmake \ curl \ emacs \ flex \ git \ libcdk5-dev \ libiio-dev \ libxml2 \ libxml2-dev \ locales \ nano \ openjdk-8-jre-headless \ python3 \ python3-pip \ ssh \ sshpass \ sudo \ trace-cmd \ usbutils \ vim \ wget \ zip # Clone and download iio-capture RUN git clone -v https://github.com/BayLibre/iio-capture.git /tmp/iio-capture && \ cd /tmp/iio-capture && \ make && \ make install RUN pip3 install pandas # Ensure we're using utf-8 as our default encoding RUN locale-gen en_US.UTF-8 ENV LANG en_US.UTF-8 ENV LANGUAGE en_US:en ENV LC_ALL en_US.UTF-8 # Let's get the two repos we need, and install them RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && \ cd /tmp/devlib && \ git checkout $DEVLIB_REF && \ python3 setup.py install && \ pip3 install .[full] RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && \ cd /tmp/wa && \ git checkout $WA_REF && \ python3 setup.py install && \ pip3 install .[all] # Clean-up RUN rm -R /tmp/devlib /tmp/wa # Create and switch to the wa user RUN useradd -m -G plugdev,dialout wa USER wa # Let's set up the Android SDK for the user RUN mkdir -p /home/wa/.android RUN mkdir -p /home/wa/AndroidSDK && cd /home/wa/AndroidSDK && wget $ANDROID_SDK_URL -O sdk.zip && unzip sdk.zip RUN cd /home/wa/AndroidSDK/tools/bin && yes | ./sdkmanager --licenses && ./sdkmanager platform-tools && ./sdkmanager 'build-tools;27.0.3' # Download Monsoon RUN mkdir -p /home/wa/monsoon RUN curl https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py\?format\=TEXT | base64 --decode > /home/wa/monsoon/monsoon.py RUN chmod +x /home/wa/monsoon/monsoon.py # Update WA's required environment variables. RUN echo 'export PATH=/home/wa/monsoon:${PATH}' >> /home/wa/.wa_environment RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.wa_environment RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.wa_environment RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.wa_environment # Source WA environment variables in an interactive environment RUN echo 'source /home/wa/.wa_environment' >> /home/wa/.bashrc # Generate some ADB keys. These will change each time the image is build but will otherwise persist. RUN /home/wa/AndroidSDK/platform-tools/adb keygen /home/wa/.android/adbkey # We need to make sure to add the remote assets too RUN wa --version && echo 'remote_assets_url: https://raw.githubusercontent.com/ARM-software/workload-automation-assets/master/dependencies' >> /home/wa/.workload_automation/config.yaml ================================================ FILE: extras/README ================================================ This directory is intended for miscellaneous extra stuff that may be useful while developing Workload Automation. It should *NOT* contain anything necessary for *using* workload automation. Whenever you add something to this directory, please also add a short description of what it is in this file. Dockerfile Docker file for generating a Docker image containing WA, devlib, and the required parts of the Android SDK. This can be run in a container to avoid configuring WA on the host. Should work "out of the box". pylintrc pylint configuration file set up for WA development (see comment at the top of the file for how to use). walog.vim Vim syntax file for WA logs; adds highlighting similar to what comes out in the console. See comment in the file for how to enable it. ================================================ FILE: extras/pylintrc ================================================ # # pylint configuration for Workload Automation. # # To install pylint run # # sudo apt-get install pylint # # copy this file to ~/.pylintrc in order for pylint to pick it up. # (Or alternatively, specify it with --rcfile option on invocation.) # # Note: If you're adding something to disable setting, please also add the # explanation of the code in the comment above it. Messages should only # be added here we really don't *ever* care about them. For ignoring # messages on specific lines or in specific files, add the appropriate # pylint disable clause in the source. # [MASTER] #profile=no ignore=external [MESSAGES CONTROL] # Disable the following messags: # C0301: Line too long (%s/%s) # C0103: Invalid name "%s" (should match %s) # C0111: Missing docstring # W0142 - Used * or ** magic # R0903: Too few public methods # R0904: Too many public methods # R0922: Abstract class is only referenced 1 times # W0511: TODO Note: this is disabled for a cleaner output, but should be reenabled # occasionally (through command line argument) to make sure all # TODO's are addressed, e.g. before a release. # W0141: Used builtin function (map|filter) # I0011: Locally disabling %s # R0921: %s: Abstract class not referenced # Note: this needs to be in the rc file due to a known bug in pylint: # http://www.logilab.org/ticket/111138 # W1401: nomalous-backslash-in-string, due to: # https://bitbucket.org/logilab/pylint/issue/272/anomalous-backslash-in-string-for-raw # C0330: bad continuation, due to: # https://bitbucket.org/logilab/pylint/issue/232/wrong-hanging-indentation-false-positive # TODO: disabling no-value-for-parameter and logging-format-interpolation, as they appear to be broken # in version 1.4.1 and return a lot of false postives; should be re-enabled once fixed. disable=C0301,C0103,C0111,W0142,R0903,R0904,R0922,W0511,W0141,I0011,R0921,W1401,C0330,no-value-for-parameter,logging-format-interpolation,no-else-return,inconsistent-return-statements,keyword-arg-before-vararg,consider-using-enumerate,no-member,super-with-arguments,useless-object-inheritance,raise-missing-from,no-else-raise,no-else-break,no-else-continue [FORMAT] max-module-lines=4000 [DESIGN] # We have DeviceConfig classes that are basically just repositories of confuration # settings. max-args=30 max-attributes=30 [SIMILARITIES] min-similarity-lines=10 [REPORTS] output-format=colorized reports=no [IMPORTS] # Parts of string are not deprecated. Throws too many false positives. deprecated-modules= ================================================ FILE: extras/walog.vim ================================================ " Copy this into ~/.vim/syntax/ and add the following to your ~/.vimrc: " au BufRead,BufNewFile run.log set filetype=walog " if exists("b:current_syntax") finish endif syn region debugPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG' end=':' syn region infoPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO' end=':' syn region warningPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING' end=':' syn region errorPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d ERROR' end=':' syn region critPreamble start='\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL' end=':' hi debugPreamble guifg=Blue ctermfg=DarkBlue hi infoPreamble guifg=Green ctermfg=DarkGreen hi warningPreamble guifg=Yellow ctermfg=178 hi errorPreamble guifg=Red ctermfg=DarkRed hi critPreamble guifg=Red ctermfg=DarkRed cterm=bold gui=bold let b:current_syntax='walog' ================================================ FILE: pytest.ini ================================================ [pytest] filterwarnings= ignore::DeprecationWarning:past[.*] ================================================ FILE: requirements.txt ================================================ bcrypt==4.0.1 certifi==2024.7.4 cffi==1.15.1 charset-normalizer==3.1.0 colorama==0.4.6 cryptography==44.0.1 devlib==1.3.4 future==0.18.3 idna==3.7 Louie-latest==1.3.1 lxml==4.9.2 nose==1.3.7 numpy==1.24.3 pandas==2.0.1 paramiko==3.4.0 pexpect==4.8.0 ptyprocess==0.7.0 pycparser==2.21 PyNaCl==1.5.0 pyserial==3.5 python-dateutil==2.8.2 pytz==2023.3 PyYAML==6.0 requests==2.32.4 scp==0.14.5 six==1.16.0 tzdata==2023.3 urllib3==2.5.0 wlauto==3.3.1 wrapt==1.15.0 ================================================ FILE: scripts/cpustates ================================================ #!/usr/bin/env python # Copyright 2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wlauto.utils.power import main main() ================================================ FILE: scripts/wa ================================================ #!/usr/bin/env python # Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa.framework.entrypoint import main main() ================================================ FILE: setup.py ================================================ # Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import warnings from itertools import chain try: from setuptools import setup from setuptools.command.sdist import sdist as orig_sdist except ImportError: from distutils.core import setup from distutils.command.sdist import sdist as orig_sdist wa_dir = os.path.join(os.path.dirname(__file__), 'wa') sys.path.insert(0, os.path.join(wa_dir, 'framework')) from version import (get_wa_version, get_wa_version_with_commit, format_version, required_devlib_version) # happens if falling back to distutils warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'") try: os.remove('MANIFEST') except OSError: pass packages = [] data_files = {'': [os.path.join(wa_dir, 'commands', 'postgres_schema.sql')]} source_dir = os.path.dirname(__file__) for root, dirs, files in os.walk(wa_dir): rel_dir = os.path.relpath(root, source_dir) data = [] if '__init__.py' in files: for f in files: if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']: data.append(f) package_name = rel_dir.replace(os.sep, '.') package_dir = root packages.append(package_name) data_files[package_name] = data else: # use previous package name filepaths = [os.path.join(root, f) for f in files] data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths]) scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')] with open("README.rst", "r") as fh: long_description = fh.read() devlib_version = format_version(required_devlib_version) params = dict( name='wlauto', description='A framework for automating workload execution and measurement collection on ARM devices.', long_description=long_description, version=get_wa_version_with_commit(), packages=packages, package_data=data_files, include_package_data=True, scripts=scripts, url='https://github.com/ARM-software/workload-automation', license='Apache v2', maintainer='ARM Architecture & Technology Device Lab', maintainer_email='workload-automation@arm.com', python_requires='>= 3.7', setup_requires=[ 'numpy<=1.16.4; python_version<"3"', 'numpy; python_version>="3"', ], install_requires=[ 'python-dateutil', # converting between UTC and local time. 'pexpect>=3.3', # Send/receive to/from device 'pyserial', # Serial port interface 'colorama', # Printing with colors 'pyYAML>=5.1b3', # YAML-formatted agenda parsing 'requests', # Fetch assets over HTTP 'devlib>={}'.format(devlib_version), # Interacting with devices 'louie-latest', # callbacks dispatch 'wrapt', # better decorators 'pandas>=0.23.0,<=0.24.2; python_version<"3.5.3"', # Data analysis and manipulation 'pandas>=0.23.0; python_version>="3.5.3"', # Data analysis and manipulation 'future', # Python 2-3 compatiblity ], dependency_links=['https://github.com/ARM-software/devlib/tarball/master#egg=devlib-{}'.format(devlib_version)], extras_require={ 'test': ['nose', 'mock'], 'notify': ['notify2'], 'doc': ['sphinx', 'sphinx_rtd_theme'], 'postgres': ['psycopg2-binary'], 'daq': ['daqpower'], }, # https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3', ], ) all_extras = list(chain(iter(params['extras_require'].values()))) params['extras_require']['all'] = all_extras class sdist(orig_sdist): user_options = orig_sdist.user_options + [ ('strip-commit', 's', "Strip git commit hash from package version ") ] def initialize_options(self): orig_sdist.initialize_options(self) self.strip_commit = False def run(self): if self.strip_commit: self.distribution.get_version = get_wa_version orig_sdist.run(self) params['cmdclass'] = {'sdist': sdist} setup(**params) ================================================ FILE: tests/__init__.py ================================================ ================================================ FILE: tests/ci/idle_agenda.yaml ================================================ config: iterations: 1 augmentations: - ~~ - status device: generic_local device_config: big_core: null core_clusters: null core_names: null executables_directory: null keep_password: true load_default_modules: false model: null modules: null password: null shell_prompt: ! '40:^.*(shell|root|juno)@?.*:[/~]\S* *[#$] ' unrooted: True working_directory: null workloads: - name: idle params: duration: 1 ================================================ FILE: tests/data/bad-syntax-agenda.yaml ================================================ config: # tab on the following line reboot_policy: never workloads: - antutu ================================================ FILE: tests/data/extensions/devices/test_device.py ================================================ # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa import Plugin class MockDevice(Plugin): name = 'test-device' kind = 'device' def __init__(self, *args, **kwargs): self.boot_called = 0 self.push_file_called = 0 self.pull_file_called = 0 self.execute_called = 0 self.set_sysfile_int_called = 0 self.close_called = 0 def boot(self): self.boot_called += 1 def push_file(self, source, dest): self.push_file_called += 1 def pull_file(self, source, dest): self.pull_file_called += 1 def execute(self, command): self.execute_called += 1 def set_sysfile_int(self, file, value): self.set_sysfile_int_called += 1 def close(self, command): self.close_called += 1 ================================================ FILE: tests/data/includes/agenda.yaml ================================================ config: augmentations: [~execution_time] include#: configs/test.yaml sections: - include#: sections/section1.yaml - include#: sections/section2.yaml include#: workloads.yaml ================================================ FILE: tests/data/includes/configs/test.yaml ================================================ augmentations: [cpufreq, trace-cmd] ================================================ FILE: tests/data/includes/section-include.yaml ================================================ classifiers: included: true ================================================ FILE: tests/data/includes/sections/section1.yaml ================================================ classifiers: {'section': 'one'} ================================================ FILE: tests/data/includes/sections/section2.yaml ================================================ classifiers: {'section': 'two'} include#: ../section-include.yaml ================================================ FILE: tests/data/includes/user/config.yaml ================================================ augmentations: [execution_time] ================================================ FILE: tests/data/includes/workloads.yaml ================================================ workloads: - dhrystone - name: memcpy classifiers: memcpy: True ================================================ FILE: tests/data/interrupts/after ================================================ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7 65: 0 0 0 0 0 0 0 0 GIC dma-pl330.2 66: 0 0 0 0 0 0 0 0 GIC dma-pl330.0 67: 0 0 0 0 0 0 0 0 GIC dma-pl330.1 74: 0 0 0 0 0 0 0 0 GIC s3c2410-wdt 85: 2 0 0 0 0 0 0 0 GIC exynos4210-uart 89: 368 0 0 0 0 0 0 0 GIC s3c2440-i2c.1 90: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.2 92: 1294 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.0 95: 831 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.3 103: 1 0 0 0 0 0 0 0 GIC ehci_hcd:usb1, ohci_hcd:usb2 104: 7304 0 0 0 0 0 0 0 GIC xhci_hcd:usb3, exynos-ss-udc.0 105: 0 0 0 0 0 0 0 0 GIC xhci_hcd:usb5 106: 0 0 0 0 0 0 0 0 GIC mali.0 107: 16429 0 0 0 0 0 0 0 GIC dw-mci 108: 1 0 0 0 0 0 0 0 GIC dw-mci 109: 0 0 0 0 0 0 0 0 GIC dw-mci 114: 28074 0 0 0 0 0 0 0 GIC mipi-dsi 117: 0 0 0 0 0 0 0 0 GIC exynos-gsc 118: 0 0 0 0 0 0 0 0 GIC exynos-gsc 121: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx 123: 7 0 0 0 0 0 0 0 GIC s5p-fimg2d 126: 0 0 0 0 0 0 0 0 GIC s5p-mixer 127: 0 0 0 0 0 0 0 0 GIC hdmi-int 128: 0 0 0 0 0 0 0 0 GIC s5p-mfc-v6 142: 0 0 0 0 0 0 0 0 GIC dma-pl330.3 146: 0 0 0 0 0 0 0 0 GIC s5p-tvout-cec 149: 1035 0 0 0 0 0 0 0 GIC mali.0 152: 26439 0 0 0 0 0 0 0 GIC mct_tick0 153: 0 2891 0 0 0 0 0 0 GIC mct_tick1 154: 0 0 3969 0 0 0 0 0 GIC mct_tick2 155: 0 0 0 2385 0 0 0 0 GIC mct_tick3 160: 0 0 0 0 8038 0 0 0 GIC mct_tick4 161: 0 0 0 0 0 8474 0 0 GIC mct_tick5 162: 0 0 0 0 0 0 7842 0 GIC mct_tick6 163: 0 0 0 0 0 0 0 7827 GIC mct_tick7 200: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx 201: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.29 218: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.25 220: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.27 224: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.19 251: 320 0 0 0 0 0 0 0 GIC mali.0 252: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 253: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 254: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 272: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.5 274: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.6 280: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.11 282: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.30 284: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.12 286: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.17 288: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.4 290: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.20 294: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 296: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 298: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 300: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 302: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 306: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0 316: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.2 325: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0 332: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 340: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 342: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 344: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 405: 327 0 0 0 0 0 0 0 combiner s3c_fb 409: 0 0 0 0 0 0 0 0 combiner mcuctl 414: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.28 434: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.22 436: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.23 438: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.26 443: 12 0 0 0 0 0 0 0 combiner mct_comp_irq 446: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.21 449: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.13 453: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.15 474: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.24 512: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_POWER 518: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus 524: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_HOMEPAGE 526: 1 0 0 0 0 0 0 0 exynos-eint HOST_DETECT 527: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id 531: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus 532: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id 537: 3 0 0 0 0 0 0 0 exynos-eint mxt540e_ts 538: 0 0 0 0 0 0 0 0 exynos-eint sec-pmic-irq 543: 1 0 0 0 0 0 0 0 exynos-eint hdmi-ext 544: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEDOWN 545: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEUP 546: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_MENU 547: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_BACK 655: 0 0 0 0 0 0 0 0 sec-pmic rtc-alarm0 IPI0: 0 0 0 0 0 0 0 0 Timer broadcast interrupts IPI1: 8823 7185 4642 5652 2370 2069 1452 1351 Rescheduling interrupts IPI2: 4 7 8 6 8 7 8 8 Function call interrupts IPI3: 1 0 0 0 0 0 0 0 Single function call interrupts IPI4: 0 0 0 0 0 0 0 0 CPU stop interrupts IPI5: 0 0 0 0 0 0 0 0 CPU backtrace Err: 0 ================================================ FILE: tests/data/interrupts/before ================================================ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7 65: 0 0 0 0 0 0 0 0 GIC dma-pl330.2 66: 0 0 0 0 0 0 0 0 GIC dma-pl330.0 67: 0 0 0 0 0 0 0 0 GIC dma-pl330.1 74: 0 0 0 0 0 0 0 0 GIC s3c2410-wdt 85: 2 0 0 0 0 0 0 0 GIC exynos4210-uart 89: 368 0 0 0 0 0 0 0 GIC s3c2440-i2c.1 90: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.2 92: 1204 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.0 95: 831 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.3 103: 1 0 0 0 0 0 0 0 GIC ehci_hcd:usb1, ohci_hcd:usb2 104: 7199 0 0 0 0 0 0 0 GIC xhci_hcd:usb3, exynos-ss-udc.0 105: 0 0 0 0 0 0 0 0 GIC xhci_hcd:usb5 106: 0 0 0 0 0 0 0 0 GIC mali.0 107: 16429 0 0 0 0 0 0 0 GIC dw-mci 108: 1 0 0 0 0 0 0 0 GIC dw-mci 109: 0 0 0 0 0 0 0 0 GIC dw-mci 114: 26209 0 0 0 0 0 0 0 GIC mipi-dsi 117: 0 0 0 0 0 0 0 0 GIC exynos-gsc 118: 0 0 0 0 0 0 0 0 GIC exynos-gsc 121: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx 123: 7 0 0 0 0 0 0 0 GIC s5p-fimg2d 126: 0 0 0 0 0 0 0 0 GIC s5p-mixer 127: 0 0 0 0 0 0 0 0 GIC hdmi-int 128: 0 0 0 0 0 0 0 0 GIC s5p-mfc-v6 142: 0 0 0 0 0 0 0 0 GIC dma-pl330.3 146: 0 0 0 0 0 0 0 0 GIC s5p-tvout-cec 149: 1004 0 0 0 0 0 0 0 GIC mali.0 152: 26235 0 0 0 0 0 0 0 GIC mct_tick0 153: 0 2579 0 0 0 0 0 0 GIC mct_tick1 154: 0 0 3726 0 0 0 0 0 GIC mct_tick2 155: 0 0 0 2262 0 0 0 0 GIC mct_tick3 161: 0 0 0 0 0 2554 0 0 GIC mct_tick5 162: 0 0 0 0 0 0 1911 0 GIC mct_tick6 163: 0 0 0 0 0 0 0 1928 GIC mct_tick7 200: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx 201: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.29 218: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.25 220: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.27 224: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.19 251: 312 0 0 0 0 0 0 0 GIC mali.0 252: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 253: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 254: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 272: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.5 274: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.6 280: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.11 282: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.30 284: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.12 286: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.17 288: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.4 290: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.20 294: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 296: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 298: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 300: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 302: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 306: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0 316: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.2 325: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0 332: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 340: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 342: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 344: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 405: 322 0 0 0 0 0 0 0 combiner s3c_fb 409: 0 0 0 0 0 0 0 0 combiner mcuctl 414: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.28 434: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.22 436: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.23 438: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.26 443: 12 0 0 0 0 0 0 0 combiner mct_comp_irq 446: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.21 449: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.13 453: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.15 474: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.24 512: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_POWER 518: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus 524: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_HOMEPAGE 526: 1 0 0 0 0 0 0 0 exynos-eint HOST_DETECT 527: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id 531: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus 532: 1 0 0 0 0 0 0 0 exynos-eint drd_switch_id 537: 3 0 0 0 0 0 0 0 exynos-eint mxt540e_ts 538: 0 0 0 0 0 0 0 0 exynos-eint sec-pmic-irq 543: 1 0 0 0 0 0 0 0 exynos-eint hdmi-ext 544: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEDOWN 545: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEUP 546: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_MENU 547: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_BACK 655: 0 0 0 0 0 0 0 0 sec-pmic rtc-alarm0 IPI0: 0 0 0 0 0 0 0 0 Timer broadcast interrupts IPI1: 8751 7147 4615 5623 2334 2066 1449 1348 Rescheduling interrupts IPI2: 3 6 7 6 7 6 7 7 Function call interrupts IPI3: 1 0 0 0 0 0 0 0 Single function call interrupts IPI4: 0 0 0 0 0 0 0 0 CPU stop interrupts IPI5: 0 0 0 0 0 0 0 0 CPU backtrace Err: 0 ================================================ FILE: tests/data/interrupts/result ================================================ CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7 65: 0 0 0 0 0 0 0 0 GIC dma-pl330.2 66: 0 0 0 0 0 0 0 0 GIC dma-pl330.0 67: 0 0 0 0 0 0 0 0 GIC dma-pl330.1 74: 0 0 0 0 0 0 0 0 GIC s3c2410-wdt 85: 0 0 0 0 0 0 0 0 GIC exynos4210-uart 89: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.1 90: 0 0 0 0 0 0 0 0 GIC s3c2440-i2c.2 92: 90 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.0 95: 0 0 0 0 0 0 0 0 GIC exynos5-hs-i2c.3 103: 0 0 0 0 0 0 0 0 GIC ehci_hcd:usb1, ohci_hcd:usb2 104: 105 0 0 0 0 0 0 0 GIC xhci_hcd:usb3, exynos-ss-udc.0 105: 0 0 0 0 0 0 0 0 GIC xhci_hcd:usb5 106: 0 0 0 0 0 0 0 0 GIC mali.0 107: 0 0 0 0 0 0 0 0 GIC dw-mci 108: 0 0 0 0 0 0 0 0 GIC dw-mci 109: 0 0 0 0 0 0 0 0 GIC dw-mci 114: 1865 0 0 0 0 0 0 0 GIC mipi-dsi 117: 0 0 0 0 0 0 0 0 GIC exynos-gsc 118: 0 0 0 0 0 0 0 0 GIC exynos-gsc 121: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx 123: 0 0 0 0 0 0 0 0 GIC s5p-fimg2d 126: 0 0 0 0 0 0 0 0 GIC s5p-mixer 127: 0 0 0 0 0 0 0 0 GIC hdmi-int 128: 0 0 0 0 0 0 0 0 GIC s5p-mfc-v6 142: 0 0 0 0 0 0 0 0 GIC dma-pl330.3 146: 0 0 0 0 0 0 0 0 GIC s5p-tvout-cec 149: 31 0 0 0 0 0 0 0 GIC mali.0 152: 204 0 0 0 0 0 0 0 GIC mct_tick0 153: 0 312 0 0 0 0 0 0 GIC mct_tick1 154: 0 0 243 0 0 0 0 0 GIC mct_tick2 155: 0 0 0 123 0 0 0 0 GIC mct_tick3 > 160: 0 0 0 0 8038 0 0 0 GIC mct_tick4 161: 0 0 0 0 0 5920 0 0 GIC mct_tick5 162: 0 0 0 0 0 0 5931 0 GIC mct_tick6 163: 0 0 0 0 0 0 0 5899 GIC mct_tick7 200: 0 0 0 0 0 0 0 0 GIC exynos5-jpeg-hx 201: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.29 218: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.25 220: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.27 224: 0 0 0 0 0 0 0 0 GIC exynos-sysmmu.19 251: 8 0 0 0 0 0 0 0 GIC mali.0 252: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 253: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 254: 0 0 0 0 0 0 0 0 GIC exynos5-scaler 272: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.5 274: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.6 280: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.11 282: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.30 284: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.12 286: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.17 288: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.4 290: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.20 294: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 296: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 298: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 300: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 302: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 306: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0 316: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.2 325: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.0 332: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 340: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 342: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.9 344: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.16 405: 5 0 0 0 0 0 0 0 combiner s3c_fb 409: 0 0 0 0 0 0 0 0 combiner mcuctl 414: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.28 434: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.22 436: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.23 438: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.26 443: 0 0 0 0 0 0 0 0 combiner mct_comp_irq 446: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.21 449: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.13 453: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.15 474: 0 0 0 0 0 0 0 0 combiner exynos-sysmmu.24 512: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_POWER 518: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus 524: 0 0 0 0 0 0 0 0 exynos-eint gpio-keys: KEY_HOMEPAGE 526: 0 0 0 0 0 0 0 0 exynos-eint HOST_DETECT 527: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_id 531: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_vbus 532: 0 0 0 0 0 0 0 0 exynos-eint drd_switch_id 537: 0 0 0 0 0 0 0 0 exynos-eint mxt540e_ts 538: 0 0 0 0 0 0 0 0 exynos-eint sec-pmic-irq 543: 0 0 0 0 0 0 0 0 exynos-eint hdmi-ext 544: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEDOWN 545: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_VOLUMEUP 546: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_MENU 547: 0 0 0 0 0 0 0 0 s5p_gpioint gpio-keys: KEY_BACK 655: 0 0 0 0 0 0 0 0 sec-pmic rtc-alarm0 IPI0: 0 0 0 0 0 0 0 0 Timer broadcast interrupts IPI1: 72 38 27 29 36 3 3 3 Rescheduling interrupts IPI2: 1 1 1 0 1 1 1 1 Function call interrupts IPI3: 0 0 0 0 0 0 0 0 Single function call interrupts IPI4: 0 0 0 0 0 0 0 0 CPU stop interrupts IPI5: 0 0 0 0 0 0 0 0 CPU backtrace Err: 0 ================================================ FILE: tests/data/logcat.2.log ================================================ --------- beginning of /dev/log/main D/TextView( 2468): 7:07 D/TextView( 2468): 7:07 D/TextView( 2468): Thu, June 27 --------- beginning of /dev/log/system D/TextView( 3099): CaffeineMark results D/TextView( 3099): Overall score: D/TextView( 3099): Rating D/TextView( 3099): Rank D/TextView( 3099): 0 D/TextView( 3099): Details D/TextView( 3099): Publish D/TextView( 3099): Top 10 D/TextView( 3099): 3672 ================================================ FILE: tests/data/logcat.log ================================================ --------- beginning of /dev/log/main --------- beginning of /dev/log/system D/TextView( 2462): 5:05 D/TextView( 2462): 5:05 D/TextView( 2462): Mon, June 24 D/TextView( 3072): Stop Test D/TextView( 3072): Testing CPU and memory… D/TextView( 3072): 0% D/TextView( 3072): Testing CPU and memory… ================================================ FILE: tests/data/test-agenda.yaml ================================================ global: iterations: 8 boot_parameters: os_mode: mp_a15_bootcluster runtime_parameters: a7_governor: Interactive a15_governor: Interactive2 a7_cores: 3 a15_cores: 2 workloads: - id: 1c workload_name: exoplayer - id: 1d workload_name: exoplayer runtime_parameters: os_mode: mp_a7_only a7_cores: 0 iterations: 4 - id: 1e workload_name: benchmarkpi - id: 1f workload_name: antutu runtime_parameters: a7_cores: 1 a15_cores: 1 ================================================ FILE: tests/data/test-config.py ================================================ # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # device = 'TEST' ================================================ FILE: tests/test_agenda_parser.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=E0611 # pylint: disable=R0201 import os import sys from collections import defaultdict from unittest import TestCase from nose.tools import assert_equal, assert_in, raises, assert_true DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') os.environ['WA_USER_DIRECTORY'] = os.path.join(DATA_DIR, 'includes') from wa.framework.configuration.execution import ConfigManager from wa.framework.configuration.parsers import AgendaParser from wa.framework.exception import ConfigError from wa.utils.serializer import yaml from wa.utils.types import reset_all_counters YAML_TEST_FILE = os.path.join(DATA_DIR, 'test-agenda.yaml') YAML_BAD_SYNTAX_FILE = os.path.join(DATA_DIR, 'bad-syntax-agenda.yaml') INCLUDES_TEST_FILE = os.path.join(DATA_DIR, 'includes', 'agenda.yaml') invalid_agenda_text = """ workloads: - id: 1 workload_parameters: test: 1 """ duplicate_agenda_text = """ global: iterations: 1 workloads: - id: 1 workload_name: antutu workload_parameters: test: 1 - id: "1" workload_name: benchmarkpi """ short_agenda_text = """ workloads: [antutu, dhrystone, benchmarkpi] """ default_ids_agenda_text = """ workloads: - antutu - id: wk1 name: benchmarkpi - id: test name: dhrystone params: cpus: 1 - vellamo """ sectioned_agenda_text = """ sections: - id: sec1 runtime_params: dp: one workloads: - name: antutu workload_parameters: markers_enabled: True - benchmarkpi - name: dhrystone runtime_params: dp: two - id: sec2 runtime_params: dp: three workloads: - antutu workloads: - memcpy """ dup_sectioned_agenda_text = """ sections: - id: sec1 workloads: - antutu - id: sec1 workloads: - benchmarkpi workloads: - memcpy """ yaml_anchors_agenda_text = """ workloads: - name: dhrystone params: &dhrystone_single_params cleanup_assets: true cpus: 0 delay: 3 duration: 0 mloops: 10 threads: 1 - name: dhrystone params: <<: *dhrystone_single_params threads: 4 """ class AgendaTest(TestCase): def setUp(self): reset_all_counters() self.config = ConfigManager() self.parser = AgendaParser() def test_yaml_load(self): self.parser.load_from_path(self.config, YAML_TEST_FILE) assert_equal(len(self.config.jobs_config.root_node.workload_entries), 4) def test_duplicate_id(self): duplicate_agenda = yaml.load(duplicate_agenda_text) try: self.parser.load(self.config, duplicate_agenda, 'test') except ConfigError as e: assert_in('duplicate', e.message.lower()) # pylint: disable=E1101 else: raise Exception('ConfigError was not raised for an agenda with duplicate ids.') def test_yaml_missing_field(self): invalid_agenda = yaml.load(invalid_agenda_text) try: self.parser.load(self.config, invalid_agenda, 'test') except ConfigError as e: assert_in('workload name', e.message) else: raise Exception('ConfigError was not raised for an invalid agenda.') def test_defaults(self): short_agenda = yaml.load(short_agenda_text) self.parser.load(self.config, short_agenda, 'test') workload_entries = self.config.jobs_config.root_node.workload_entries assert_equal(len(workload_entries), 3) assert_equal(workload_entries[0].config['workload_name'], 'antutu') assert_equal(workload_entries[0].id, 'wk1') def test_default_id_assignment(self): default_ids_agenda = yaml.load(default_ids_agenda_text) self.parser.load(self.config, default_ids_agenda, 'test2') workload_entries = self.config.jobs_config.root_node.workload_entries assert_equal(workload_entries[0].id, 'wk2') assert_equal(workload_entries[3].id, 'wk3') def test_sections(self): sectioned_agenda = yaml.load(sectioned_agenda_text) self.parser.load(self.config, sectioned_agenda, 'test') root_node_workload_entries = self.config.jobs_config.root_node.workload_entries leaves = list(self.config.jobs_config.root_node.leaves()) section1_workload_entries = leaves[0].workload_entries section2_workload_entries = leaves[0].workload_entries assert_equal(root_node_workload_entries[0].config['workload_name'], 'memcpy') assert_true(section1_workload_entries[0].config['workload_parameters']['markers_enabled']) assert_equal(section2_workload_entries[0].config['workload_name'], 'antutu') def test_yaml_anchors(self): yaml_anchors_agenda = yaml.load(yaml_anchors_agenda_text) self.parser.load(self.config, yaml_anchors_agenda, 'test') workload_entries = self.config.jobs_config.root_node.workload_entries assert_equal(len(workload_entries), 2) assert_equal(workload_entries[0].config['workload_name'], 'dhrystone') assert_equal(workload_entries[0].config['workload_parameters']['threads'], 1) assert_equal(workload_entries[0].config['workload_parameters']['delay'], 3) assert_equal(workload_entries[1].config['workload_name'], 'dhrystone') assert_equal(workload_entries[1].config['workload_parameters']['threads'], 4) assert_equal(workload_entries[1].config['workload_parameters']['delay'], 3) @raises(ConfigError) def test_dup_sections(self): dup_sectioned_agenda = yaml.load(dup_sectioned_agenda_text) self.parser.load(self.config, dup_sectioned_agenda, 'test') @raises(ConfigError) def test_bad_syntax(self): self.parser.load_from_path(self.config, YAML_BAD_SYNTAX_FILE) class FakeTargetManager: def merge_runtime_parameters(self, params): return params def validate_runtime_parameters(self, params): pass class IncludesTest(TestCase): def test_includes(self): from pprint import pprint parser = AgendaParser() cm = ConfigManager() tm = FakeTargetManager() includes = parser.load_from_path(cm, INCLUDES_TEST_FILE) include_set = set([os.path.basename(i) for i in includes]) assert_equal(include_set, set(['test.yaml', 'section1.yaml', 'section2.yaml', 'section-include.yaml', 'workloads.yaml'])) job_classifiers = {j.id: j.classifiers for j in cm.jobs_config.generate_job_specs(tm)} assert_equal(job_classifiers, { 's1-wk1': {'section': 'one'}, 's2-wk1': {'section': 'two', 'included': True}, 's1-wk2': {'section': 'one', 'memcpy': True}, 's2-wk2': {'section': 'two', 'included': True, 'memcpy': True}, }) ================================================ FILE: tests/test_config.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from nose.tools import assert_equal from wa.framework.configuration.execution import ConfigManager from wa.utils.misc import merge_config_values class TestConfigUtils(unittest.TestCase): def test_merge_values(self): test_cases = [ # base, other, expected_result ('a', 3, 3), ('a', [1, 2], ['a', 1, 2]), ({1: 2}, [3, 4], [{1: 2}, 3, 4]), (set([2]), [1, 2, 3], [2, 1, 3]), ([1, 2, 3], set([2]), set([1, 2, 3])), ([1, 2], None, [1, 2]), (None, 'a', 'a'), ] for v1, v2, expected in test_cases: result = merge_config_values(v1, v2) assert_equal(result, expected) if v2 is not None: assert_equal(type(result), type(v2)) class TestConfigParser(unittest.TestCase): def test_param_merge(self): config = ConfigManager() config.load_config({'workload_params': {'one': 1, 'three': {'ex': 'x'}}, 'runtime_params': {'aye': 'a'}}, 'file_one') config.load_config({'workload_params': {'two': 2, 'three': {'why': 'y'}}, 'runtime_params': {'bee': 'b'}}, 'file_two') assert_equal( config.jobs_config.job_spec_template['workload_parameters'], {'one': 1, 'two': 2, 'three': {'why': 'y'}}, ) assert_equal( config.jobs_config.job_spec_template['runtime_parameters'], {'aye': 'a', 'bee': 'b'}, ) ================================================ FILE: tests/test_diff.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=E0611 # pylint: disable=R0201 import os import tempfile from unittest import TestCase from nose.tools import assert_equal from wa.utils.diff import diff_interrupt_files class InterruptDiffTest(TestCase): def test_interrupt_diff(self): file_dir = os.path.join(os.path.dirname(__file__), 'data', 'interrupts') before_file = os.path.join(file_dir, 'before') after_file = os.path.join(file_dir, 'after') expected_result_file = os.path.join(file_dir, 'result') output_file = tempfile.mktemp() diff_interrupt_files(before_file, after_file, output_file) with open(output_file) as fh: output_diff = fh.read() with open(expected_result_file) as fh: expected_diff = fh.read() assert_equal(output_diff, expected_diff) ================================================ FILE: tests/test_exec_control.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=W0231,W0613,E0611,W0603,R0201 from unittest import TestCase from nose.tools import assert_equal, assert_raises from wa.utils.exec_control import (init_environment, reset_environment, activate_environment, once, once_per_class, once_per_instance, once_per_attribute_value) class MockClass(object): called = 0 def __init__(self): self.count = 0 @once def called_once(self): MockClass.called += 1 @once def initilize_once(self): self.count += 1 @once_per_class def initilize_once_per_class(self): self.count += 1 @once_per_instance def initilize_once_per_instance(self): self.count += 1 def __repr__(self): return '{}: Called={}'.format(self.__class__.__name__, self.called) class SubClass(MockClass): def __init__(self): super(SubClass, self).__init__() @once def initilize_once(self): super(SubClass, self).initilize_once() self.count += 1 @once_per_class def initilize_once_per_class(self): super(SubClass, self).initilize_once_per_class() self.count += 1 @once_per_instance def initilize_once_per_instance(self): super(SubClass, self).initilize_once_per_instance() self.count += 1 class SubSubClass(SubClass): def __init__(self): super(SubSubClass, self).__init__() @once def initilize_once(self): super(SubSubClass, self).initilize_once() self.count += 1 @once_per_class def initilize_once_per_class(self): super(SubSubClass, self).initilize_once_per_class() self.count += 1 @once_per_instance def initilize_once_per_instance(self): super(SubSubClass, self).initilize_once_per_instance() self.count += 1 class AnotherClass(object): def __init__(self): self.count = 0 @once def initilize_once(self): self.count += 1 @once_per_class def initilize_once_per_class(self): self.count += 1 @once_per_instance def initilize_once_per_instance(self): self.count += 1 class NamedClass: count = 0 def __init__(self, name): self.name = name @once_per_attribute_value('name') def initilize(self): NamedClass.count += 1 class AnotherSubClass(MockClass): def __init__(self): super(AnotherSubClass, self).__init__() @once def initilize_once(self): super(AnotherSubClass, self).initilize_once() self.count += 1 @once_per_class def initilize_once_per_class(self): super(AnotherSubClass, self).initilize_once_per_class() self.count += 1 @once_per_instance def initilize_once_per_instance(self): super(AnotherSubClass, self).initilize_once_per_instance() self.count += 1 class EnvironmentManagementTest(TestCase): def test_duplicate_environment(self): init_environment('ENVIRONMENT') assert_raises(ValueError, init_environment, 'ENVIRONMENT') def test_reset_missing_environment(self): assert_raises(ValueError, reset_environment, 'MISSING') def test_reset_current_environment(self): activate_environment('CURRENT_ENVIRONMENT') t1 = MockClass() t1.initilize_once() assert_equal(t1.count, 1) reset_environment() t1.initilize_once() assert_equal(t1.count, 2) def test_switch_environment(self): activate_environment('ENVIRONMENT1') t1 = MockClass() t1.initilize_once() assert_equal(t1.count, 1) activate_environment('ENVIRONMENT2') t1.initilize_once() assert_equal(t1.count, 2) activate_environment('ENVIRONMENT1') t1.initilize_once() assert_equal(t1.count, 2) def test_reset_environment_name(self): activate_environment('ENVIRONMENT') t1 = MockClass() t1.initilize_once() assert_equal(t1.count, 1) reset_environment('ENVIRONMENT') t1.initilize_once() assert_equal(t1.count, 2) class ParentOnlyOnceEvironmentTest(TestCase): def test_sub_classes(self): sc = SubClass() asc = AnotherSubClass() sc.called_once() assert_equal(sc.called, 1) asc.called_once() assert_equal(asc.called, 1) class OnlyOnceEnvironmentTest(TestCase): def setUp(self): activate_environment('TEST_ENVIRONMENT') def tearDown(self): reset_environment('TEST_ENVIRONMENT') def test_single_instance(self): t1 = MockClass() ac = AnotherClass() t1.initilize_once() assert_equal(t1.count, 1) t1.initilize_once() assert_equal(t1.count, 1) ac.initilize_once() assert_equal(ac.count, 1) def test_mulitple_instances(self): t1 = MockClass() t2 = MockClass() t1.initilize_once() assert_equal(t1.count, 1) t2.initilize_once() assert_equal(t2.count, 0) def test_sub_classes(self): t1 = MockClass() sc = SubClass() ss = SubSubClass() asc = AnotherSubClass() t1.initilize_once() assert_equal(t1.count, 1) sc.initilize_once() sc.initilize_once() assert_equal(sc.count, 1) ss.initilize_once() ss.initilize_once() assert_equal(ss.count, 1) asc.initilize_once() asc.initilize_once() assert_equal(asc.count, 1) class OncePerClassEnvironmentTest(TestCase): def setUp(self): activate_environment('TEST_ENVIRONMENT') def tearDown(self): reset_environment('TEST_ENVIRONMENT') def test_single_instance(self): t1 = MockClass() ac = AnotherClass() t1.initilize_once_per_class() assert_equal(t1.count, 1) t1.initilize_once_per_class() assert_equal(t1.count, 1) ac.initilize_once_per_class() assert_equal(ac.count, 1) def test_mulitple_instances(self): t1 = MockClass() t2 = MockClass() t1.initilize_once_per_class() assert_equal(t1.count, 1) t2.initilize_once_per_class() assert_equal(t2.count, 0) def test_sub_classes(self): t1 = MockClass() sc1 = SubClass() sc2 = SubClass() ss1 = SubSubClass() ss2 = SubSubClass() asc = AnotherSubClass() t1.initilize_once_per_class() assert_equal(t1.count, 1) sc1.initilize_once_per_class() sc2.initilize_once_per_class() assert_equal(sc1.count, 1) assert_equal(sc2.count, 0) ss1.initilize_once_per_class() ss2.initilize_once_per_class() assert_equal(ss1.count, 1) assert_equal(ss2.count, 0) asc.initilize_once_per_class() assert_equal(asc.count, 1) class OncePerInstanceEnvironmentTest(TestCase): def setUp(self): activate_environment('TEST_ENVIRONMENT') def tearDown(self): reset_environment('TEST_ENVIRONMENT') def test_single_instance(self): t1 = MockClass() ac = AnotherClass() t1.initilize_once_per_instance() assert_equal(t1.count, 1) t1.initilize_once_per_instance() assert_equal(t1.count, 1) ac.initilize_once_per_instance() assert_equal(ac.count, 1) def test_mulitple_instances(self): t1 = MockClass() t2 = MockClass() t1.initilize_once_per_instance() assert_equal(t1.count, 1) t2.initilize_once_per_instance() assert_equal(t2.count, 1) def test_sub_classes(self): t1 = MockClass() sc = SubClass() ss = SubSubClass() asc = AnotherSubClass() t1.initilize_once_per_instance() assert_equal(t1.count, 1) sc.initilize_once_per_instance() sc.initilize_once_per_instance() assert_equal(sc.count, 2) ss.initilize_once_per_instance() ss.initilize_once_per_instance() assert_equal(ss.count, 3) asc.initilize_once_per_instance() asc.initilize_once_per_instance() assert_equal(asc.count, 2) class OncePerAttributeValueTest(TestCase): def setUp(self): activate_environment('TEST_ENVIRONMENT') def tearDown(self): reset_environment('TEST_ENVIRONMENT') def test_once_attribute_value(self): classes = [ NamedClass('Rick'), NamedClass('Morty'), NamedClass('Rick'), NamedClass('Morty'), NamedClass('Morty'), NamedClass('Summer'), ] for c in classes: c.initilize() for c in classes: c.initilize() assert_equal(NamedClass.count, 3) ================================================ FILE: tests/test_execution.py ================================================ # Copyright 2020 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import tempfile from unittest import TestCase from mock.mock import Mock from nose.tools import assert_equal from datetime import datetime from wa.framework.configuration import RunConfiguration from wa.framework.configuration.core import JobSpec, Status from wa.framework.execution import ExecutionContext, Runner from wa.framework.job import Job from wa.framework.output import RunOutput, init_run_output from wa.framework.output_processor import ProcessorManager import wa.framework.signal as signal from wa.framework.run import JobState from wa.framework.exception import ExecutionError class MockConfigManager(Mock): @property def jobs(self): return self._joblist @property def loaded_config_sources(self): return [] @property def plugin_cache(self): return MockPluginCache() def __init__(self, *args, **kwargs): super(MockConfigManager, self).__init__(*args, **kwargs) self._joblist = None self.run_config = RunConfiguration() def to_pod(self): return {} class MockPluginCache(Mock): def list_plugins(self, kind=None): return [] class MockProcessorManager(Mock): def __init__(self, *args, **kwargs): super(MockProcessorManager, self).__init__(*args, **kwargs) def get_enabled(self): return [] class JobState_force_retry(JobState): @property def status(self): return self._status @status.setter def status(self, value): if(self.retries != self.times_to_retry) and (value == Status.RUNNING): self._status = Status.FAILED if self.output: self.output.status = Status.FAILED else: self._status = value if self.output: self.output.status = value def __init__(self, to_retry, *args, **kwargs): self.retries = 0 self._status = Status.NEW self.times_to_retry = to_retry self.output = None super(JobState_force_retry, self).__init__(*args, **kwargs) class Job_force_retry(Job): '''This class imitates a job that retries as many times as specified by ``retries`` in its constructor''' def __init__(self, to_retry, *args, **kwargs): super(Job_force_retry, self).__init__(*args, **kwargs) self.state = JobState_force_retry(to_retry, self.id, self.label, self.iteration, Status.NEW) self.initialized = False self.finalized = False def initialize(self, context): self.initialized = True return super().initialize(context) def finalize(self, context): self.finalized = True return super().finalize(context) class TestRunState(TestCase): def setUp(self): self.path = tempfile.mkstemp()[1] os.remove(self.path) self.initialise_signals() self.context = get_context(self.path) self.job_spec = get_jobspec() def tearDown(self): signal.disconnect(self._verify_serialized_state, signal.RUN_INITIALIZED) signal.disconnect(self._verify_serialized_state, signal.JOB_STARTED) signal.disconnect(self._verify_serialized_state, signal.JOB_RESTARTED) signal.disconnect(self._verify_serialized_state, signal.JOB_COMPLETED) signal.disconnect(self._verify_serialized_state, signal.JOB_FAILED) signal.disconnect(self._verify_serialized_state, signal.JOB_ABORTED) signal.disconnect(self._verify_serialized_state, signal.RUN_FINALIZED) def test_job_state_transitions_pass(self): '''Tests state equality when the job passes first try''' job = Job(self.job_spec, 1, self.context) job.workload = Mock() self.context.cm._joblist = [job] self.context.run_state.add_job(job) runner = Runner(self.context, MockProcessorManager()) runner.run() def test_job_state_transitions_fail(self): '''Tests state equality when job fails completely''' job = Job_force_retry(3, self.job_spec, 1, self.context) job.workload = Mock() self.context.cm._joblist = [job] self.context.run_state.add_job(job) runner = Runner(self.context, MockProcessorManager()) runner.run() def test_job_state_transitions_retry(self): '''Tests state equality when job fails initially''' job = Job_force_retry(1, self.job_spec, 1, self.context) job.workload = Mock() self.context.cm._joblist = [job] self.context.run_state.add_job(job) runner = Runner(self.context, MockProcessorManager()) runner.run() def initialise_signals(self): signal.connect(self._verify_serialized_state, signal.RUN_INITIALIZED) signal.connect(self._verify_serialized_state, signal.JOB_STARTED) signal.connect(self._verify_serialized_state, signal.JOB_RESTARTED) signal.connect(self._verify_serialized_state, signal.JOB_COMPLETED) signal.connect(self._verify_serialized_state, signal.JOB_FAILED) signal.connect(self._verify_serialized_state, signal.JOB_ABORTED) signal.connect(self._verify_serialized_state, signal.RUN_FINALIZED) def _verify_serialized_state(self, _): fs_state = RunOutput(self.path).state ex_state = self.context.run_output.state assert_equal(fs_state.status, ex_state.status) fs_js_zip = zip( [value for key, value in fs_state.jobs.items()], [value for key, value in ex_state.jobs.items()] ) for fs_jobstate, ex_jobstate in fs_js_zip: assert_equal(fs_jobstate.iteration, ex_jobstate.iteration) assert_equal(fs_jobstate.retries, ex_jobstate.retries) assert_equal(fs_jobstate.status, ex_jobstate.status) class TestJobState(TestCase): def test_job_retry_status(self): job_spec = get_jobspec() context = get_context() job = Job_force_retry(2, job_spec, 1, context) job.workload = Mock() context.cm._joblist = [job] context.run_state.add_job(job) verifier = lambda _: assert_equal(job.status, Status.PENDING) signal.connect(verifier, signal.JOB_RESTARTED) runner = Runner(context, MockProcessorManager()) runner.run() signal.disconnect(verifier, signal.JOB_RESTARTED) def test_skipped_job_state(self): # Test, if the first job fails and the bail parameter set, # that the remaining jobs have status: SKIPPED job_spec = get_jobspec() context = get_context() context.cm.run_config.bail_on_job_failure = True job1 = Job_force_retry(3, job_spec, 1, context) job2 = Job(job_spec, 1, context) job1.workload = Mock() job2.workload = Mock() context.cm._joblist = [job1, job2] context.run_state.add_job(job1) context.run_state.add_job(job2) runner = Runner(context, MockProcessorManager()) try: runner.run() except ExecutionError: assert_equal(job2.status, Status.SKIPPED) else: assert False, "ExecutionError not raised" def test_normal_job_finalized(self): # Test that a job is initialized then finalized normally job_spec = get_jobspec() context = get_context() job = Job_force_retry(0, job_spec, 1, context) job.workload = Mock() context.cm._joblist = [job] context.run_state.add_job(job) runner = Runner(context, MockProcessorManager()) runner.run() assert_equal(job.initialized, True) assert_equal(job.finalized, True) def test_skipped_job_finalized(self): # Test that a skipped job has been finalized job_spec = get_jobspec() context = get_context() context.cm.run_config.bail_on_job_failure = True job1 = Job_force_retry(3, job_spec, 1, context) job2 = Job_force_retry(0, job_spec, 1, context) job1.workload = Mock() job2.workload = Mock() context.cm._joblist = [job1, job2] context.run_state.add_job(job1) context.run_state.add_job(job2) runner = Runner(context, MockProcessorManager()) try: runner.run() except ExecutionError: assert_equal(job2.finalized, True) else: assert False, "ExecutionError not raised" def test_failed_job_finalized(self): # Test that a failed job, while the bail parameter is set, # is finalized job_spec = get_jobspec() context = get_context() context.cm.run_config.bail_on_job_failure = True job1 = Job_force_retry(3, job_spec, 1, context) job1.workload = Mock() context.cm._joblist = [job1] context.run_state.add_job(job1) runner = Runner(context, MockProcessorManager()) try: runner.run() except ExecutionError: assert_equal(job1.finalized, True) else: assert False, "ExecutionError not raised" def get_context(path=None): if not path: path = tempfile.mkstemp()[1] os.remove(path) config = MockConfigManager() output = init_run_output(path, config) return ExecutionContext(config, Mock(), output) def get_jobspec(): job_spec = JobSpec() job_spec.augmentations = {} job_spec.finalize() return job_spec ================================================ FILE: tests/test_plugin.py ================================================ # Copyright 2014-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=E0611,R0201,E1101 import os from unittest import TestCase from nose.tools import assert_equal, raises, assert_true from wa.framework.plugin import Plugin, PluginMeta, PluginLoader, Parameter from wa.utils.types import list_of_ints from wa import ConfigError EXTDIR = os.path.join(os.path.dirname(__file__), 'data', 'extensions') class PluginLoaderTest(TestCase): def setUp(self): self.loader = PluginLoader(paths=[EXTDIR, ]) def test_load_device(self): device = self.loader.get_device('test-device') assert_equal(device.name, 'test-device') def test_list_by_kind(self): exts = self.loader.list_devices() assert_equal(len(exts), 1) assert_equal(exts[0].name, 'test-device') class MyBasePlugin(Plugin): name = 'base' kind = 'test' parameters = [ Parameter('base'), ] def __init__(self, **kwargs): super(MyBasePlugin, self).__init__(**kwargs) self.v1 = 0 self.v2 = 0 self.v3 = '' def virtual1(self): self.v1 += 1 self.v3 = 'base' def virtual2(self): self.v2 += 1 class MyAcidPlugin(MyBasePlugin): name = 'acid' parameters = [ Parameter('hydrochloric', kind=list_of_ints, default=[1, 2]), Parameter('citric'), Parameter('carbonic', kind=int), ] def __init__(self, **kwargs): super(MyAcidPlugin, self).__init__(**kwargs) self.vv1 = 0 self.vv2 = 0 def virtual1(self): self.vv1 += 1 self.v3 = 'acid' def virtual2(self): self.vv2 += 1 class MyOtherPlugin(MyBasePlugin): name = 'other' parameters = [ Parameter('mandatory', mandatory=True), Parameter('optional', allowed_values=['test', 'check']), ] class MyOtherOtherPlugin(MyOtherPlugin): name = 'otherother' parameters = [ Parameter('mandatory', override=True), ] class MyOverridingPlugin(MyAcidPlugin): name = 'overriding' parameters = [ Parameter('hydrochloric', override=True, default=[3, 4]), ] class MyThirdTeerPlugin(MyOverridingPlugin): name = 'thirdteer' class MultiValueParamExt(Plugin): name = 'multivalue' kind = 'test' parameters = [ Parameter('test', kind=list_of_ints, allowed_values=[42, 7, 73]), ] class PluginMetaTest(TestCase): def test_propagation(self): acid_params = [p.name for p in MyAcidPlugin.parameters] assert_equal(acid_params, ['base', 'hydrochloric', 'citric', 'carbonic']) @raises(ValueError) def test_duplicate_param_spec(self): class BadPlugin(MyBasePlugin): # pylint: disable=W0612 parameters = [ Parameter('base'), ] def test_param_override(self): class OverridingPlugin(MyBasePlugin): # pylint: disable=W0612 parameters = [ Parameter('base', override=True, default='cheese'), ] assert_equal(OverridingPlugin.parameters['base'].default, 'cheese') @raises(ValueError) def test_invalid_param_spec(self): class BadPlugin(MyBasePlugin): # pylint: disable=W0612 parameters = [ 7, ] class ParametersTest(TestCase): def test_setting(self): myext = MyAcidPlugin(hydrochloric=[5, 6], citric=5, carbonic=42) assert_equal(myext.hydrochloric, [5, 6]) assert_equal(myext.citric, '5') assert_equal(myext.carbonic, 42) def test_validation_ok(self): myext = MyOtherPlugin(mandatory='check', optional='check') myext.validate() def test_default_override(self): myext = MyOverridingPlugin() assert_equal(myext.hydrochloric, [3, 4]) myotherext = MyThirdTeerPlugin() assert_equal(myotherext.hydrochloric, [3, 4]) def test_multivalue_param(self): myext = MultiValueParamExt(test=[7, 42]) myext.validate() assert_equal(myext.test, [7, 42]) @raises(ConfigError) def test_bad_multivalue_param(self): myext = MultiValueParamExt(test=[5]) myext.validate() @raises(ConfigError) def test_validation_no_mandatory(self): myext = MyOtherPlugin(optional='check') myext.validate() @raises(ConfigError) def test_validation_no_mandatory_in_derived(self): MyOtherOtherPlugin() @raises(ConfigError) def test_validation_bad_value(self): myext = MyOtherPlugin(mandatory=1, optional='invalid') myext.validate() ================================================ FILE: tests/test_runtime_param_utils.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from nose.tools import assert_equal from mock.mock import Mock from wa.utils.misc import resolve_cpus, resolve_unique_domain_cpus class TestRuntimeParameterUtils(unittest.TestCase): def test_resolve_cpu(self): # Set up a mock target mock = Mock() mock.big_core = "A72" mock.little_core = "A53" mock.core_names = ['A72', 'A72', 'A53', 'A53'] mock.number_of_cpus = 4 def mock_core_cpus(core): return [i for i, c in enumerate(mock.core_names) if c == core] def mock_online_cpus(): return [0, 1, 2] def mock_offline_cpus(): return [3] def mock_related_cpus(core): if core in [0, 1]: return [0, 1] elif core in [2, 3]: return [2, 3] mock.list_online_cpus = mock_online_cpus mock.list_offline_cpus = mock_offline_cpus mock.core_cpus = mock_core_cpus mock.core_cpus = mock_core_cpus mock.cpufreq.get_related_cpus = mock_related_cpus # Check retrieving cpus from a given prefix assert_equal(resolve_cpus('A72', mock), [0, 1]) assert_equal(resolve_cpus('A53', mock), [2, 3]) assert_equal(resolve_cpus('big', mock), [0, 1]) assert_equal(resolve_cpus('little', mock), [2, 3]) assert_equal(resolve_cpus('', mock), [0, 1, 2, 3]) assert_equal(resolve_cpus('cpu0', mock), [0]) assert_equal(resolve_cpus('cpu3', mock), [3]) # Check get unique domain cpus assert_equal(resolve_unique_domain_cpus('A72', mock), [0]) assert_equal(resolve_unique_domain_cpus('A53', mock), [2]) assert_equal(resolve_unique_domain_cpus('big', mock), [0]) assert_equal(resolve_unique_domain_cpus('little', mock), [2]) assert_equal(resolve_unique_domain_cpus('', mock), [0, 2]) assert_equal(resolve_unique_domain_cpus('cpu0', mock), [0]) assert_equal(resolve_unique_domain_cpus('cpu3', mock), [2]) ================================================ FILE: tests/test_signal.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import unittest from nose.tools import assert_equal, assert_true, assert_false import wa.framework.signal as signal class Callable(object): def __init__(self, val): self.val = val def __call__(self): return self.val class TestSignalDisconnect(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.callback_ctr = 0 def setUp(self): signal.connect(self._call_me_once, 'first') signal.connect(self._call_me_once, 'second') def test_handler_disconnected(self): signal.send('first') signal.send('second') def _call_me_once(self): assert_equal(self.callback_ctr, 0) self.callback_ctr += 1 signal.disconnect(self._call_me_once, 'first') signal.disconnect(self._call_me_once, 'second') class TestPriorityDispatcher(unittest.TestCase): def setUp(self): # Stop logger output interfering with nose output in the console. logger = logging.getLogger('signal') logger.setLevel(logging.CRITICAL) def test_ConnectNotify(self): one = Callable(1) two = Callable(2) three = Callable(3) signal.connect( two, 'test', priority=200 ) signal.connect( one, 'test', priority=100 ) signal.connect( three, 'test', priority=300 ) result = [i[1] for i in signal.send('test')] assert_equal(result, [3, 2, 1]) def test_wrap_propagate(self): d = {'before': False, 'after': False, 'success': False} def before(): d['before'] = True def after(): d['after'] = True def success(): d['success'] = True signal.connect(before, signal.BEFORE_WORKLOAD_SETUP) signal.connect(after, signal.AFTER_WORKLOAD_SETUP) signal.connect(success, signal.SUCCESSFUL_WORKLOAD_SETUP) caught = False try: with signal.wrap('WORKLOAD_SETUP'): raise RuntimeError() except RuntimeError: caught = True assert_true(d['before']) assert_true(d['after']) assert_true(caught) assert_false(d['success']) ================================================ FILE: tests/test_utils.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=R0201 from unittest import TestCase from nose.tools import raises, assert_equal, assert_not_equal, assert_in, assert_not_in from nose.tools import assert_true, assert_false, assert_raises, assert_is, assert_list_equal from wa.utils.types import (list_or_integer, list_or_bool, caseless_string, arguments, prioritylist, enum, level, toggle_set) class TestPriorityList(TestCase): def test_insert(self): pl = prioritylist() elements = {3: "element 3", 2: "element 2", 1: "element 1", 5: "element 5", 4: "element 4" } for key in elements: pl.add(elements[key], priority=key) match = list(zip(sorted(elements.values()), pl[:])) for pair in match: assert(pair[0] == pair[1]) def test_delete(self): pl = prioritylist() elements = {2: "element 3", 1: "element 2", 0: "element 1", 4: "element 5", 3: "element 4" } for key in elements: pl.add(elements[key], priority=key) del elements[2] del pl[2] match = list(zip(sorted(elements.values()), pl[:])) for pair in match: assert(pair[0] == pair[1]) def test_multiple(self): pl = prioritylist() pl.add('1', 1) pl.add('2.1', 2) pl.add('3', 3) pl.add('2.2', 2) it = iter(pl) assert_equal(next(it), '3') assert_equal(next(it), '2.1') assert_equal(next(it), '2.2') assert_equal(next(it), '1') def test_iterator_break(self): pl = prioritylist() pl.add('1', 1) pl.add('2.1', 2) pl.add('3', 3) pl.add('2.2', 2) for i in pl: if i == '2.1': break assert_equal(pl.index('3'), 3) def test_add_before_after(self): pl = prioritylist() pl.add('m', 1) pl.add('a', 2) pl.add('n', 1) pl.add('b', 2) pl.add_before('x', 'm') assert_equal(list(pl), ['a', 'b', 'x', 'm', 'n']) pl.add_after('y', 'b') assert_equal(list(pl), ['a', 'b','y', 'x', 'm', 'n']) pl.add_after('z', 'm') assert_equal(list(pl), ['a', 'b', 'y', 'x', 'm', 'z', 'n']) class TestEnumLevel(TestCase): def test_enum_creation(self): e = enum(['one', 'two', 'three']) assert_list_equal(e.values, [0, 1, 2]) e = enum(['one', 'two', 'three'], start=10) assert_list_equal(e.values, [10, 11, 12]) e = enum(['one', 'two', 'three'], start=-10, step=10) assert_list_equal(e.values, [-10, 0, 10]) def test_enum_name_conflicts(self): assert_raises(ValueError, enum, ['names', 'one', 'two']) e = enum(['NAMES', 'one', 'two']) assert_in('names', e.levels) assert_list_equal(e.names, ['names', 'one', 'two']) assert_equal(e.ONE, 'one') result = not (e.ONE != 'one') assert_true(result) def test_enum_behavior(self): e = enum(['one', 'two', 'three']) # case-insensitive level name and level value may all # be used for equality comparisons. assert_equal(e.one, 'one') assert_equal(e.one, 'ONE') assert_equal(e.one, 0) assert_not_equal(e.one, '0') # ditto for enum membership tests assert_in('one', e.levels) assert_in(2, e.levels) assert_not_in('five', e.levels) # The same level object returned, only when # passing in a valid level name/value. assert_is(e('one'), e('ONE')) assert_is(e('one'), e(0)) assert_raises(ValueError, e, 'five') def test_serialize_level(self): l = level('test', 1) s = l.to_pod() l2 = level.from_pod(s) assert_equal(l, l2) def test_deserialize_enum(self): e = enum(['one', 'two', 'three']) s = e.one.to_pod() l = e.from_pod(s) assert_equal(l, e.one) class TestToggleSet(TestCase): def test_equality(self): ts1 = toggle_set(['one', 'two',]) ts2 = toggle_set(['one', 'two', '~three']) assert_not_equal(ts1, ts2) assert_equal(ts1.values(), ts2.values()) assert_equal(ts2, toggle_set(['two', '~three', 'one'])) def test_merge(self): ts1 = toggle_set(['one', 'two', 'three', '~four', '~five']) ts2 = toggle_set(['two', '~three', 'four', '~five']) ts3 = ts1.merge_with(ts2) assert_equal(ts1, toggle_set(['one', 'two', 'three', '~four', '~five'])) assert_equal(ts2, toggle_set(['two', '~three', 'four', '~five'])) assert_equal(ts3, toggle_set(['one', 'two', '~three', 'four', '~five'])) assert_equal(ts3.values(), set(['one', 'two','four'])) ts4 = ts1.merge_into(ts2) assert_equal(ts1, toggle_set(['one', 'two', 'three', '~four', '~five'])) assert_equal(ts2, toggle_set(['two', '~three', 'four', '~five'])) assert_equal(ts4, toggle_set(['one', 'two', 'three', '~four', '~five'])) assert_equal(ts4.values(), set(['one', 'two', 'three'])) def test_drop_all_previous(self): ts1 = toggle_set(['one', 'two', 'three']) ts2 = toggle_set(['four', '~~', 'five']) ts3 = toggle_set(['six', 'seven', '~three']) ts4 = ts1.merge_with(ts2).merge_with(ts3) assert_equal(ts4, toggle_set(['four', 'five', 'six', 'seven', '~three', '~~'])) ts5 = ts2.merge_into(ts3).merge_into(ts1) assert_equal(ts5, toggle_set(['four', 'five', '~~'])) ts6 = ts2.merge_into(ts3).merge_with(ts1) assert_equal(ts6, toggle_set(['one', 'two', 'three', 'four', 'five', '~~'])) def test_order_on_create(self): ts1 = toggle_set(['one', 'two', 'three', '~one']) assert_equal(ts1, toggle_set(['~one', 'two', 'three'])) ts1 = toggle_set(['~one', 'two', 'three', 'one']) assert_equal(ts1, toggle_set(['one', 'two', 'three'])) ================================================ FILE: wa/__init__.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa.framework import pluginloader, signal from wa.framework.command import Command, ComplexCommand, SubCommand from wa.framework.configuration import settings from wa.framework.configuration.core import Status from wa.framework.exception import (CommandError, ConfigError, HostError, InstrumentError, # pylint: disable=redefined-builtin JobError, NotFoundError, OutputProcessorError, PluginLoaderError, ResourceError, TargetError, TargetNotRespondingError, TimeoutError, ToolError, ValidationError, WAError, WorkloadError, WorkerThreadError) from wa.framework.instrument import (Instrument, extremely_slow, very_slow, slow, normal, fast, very_fast, extremely_fast, hostside) from wa.framework.output import RunOutput, discover_wa_outputs from wa.framework.output_processor import OutputProcessor from wa.framework.plugin import Plugin, Parameter, Alias from wa.framework.resource import (NO_ONE, JarFile, ApkFile, ReventFile, File, Executable) from wa.framework.target.descriptor import (TargetDescriptor, TargetDescription, create_target_description, add_description_for_target) from wa.framework.workload import (Workload, ApkWorkload, ApkUiautoWorkload, ApkReventWorkload, UIWorkload, UiautoWorkload, PackageHandler, ReventWorkload, TestPackageHandler) from wa.framework.version import get_wa_version, get_wa_version_with_commit __version__ = get_wa_version() __full_version__ = get_wa_version_with_commit() ================================================ FILE: wa/commands/__init__.py ================================================ ================================================ FILE: wa/commands/create.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import stat import shutil import string import re import uuid import getpass from collections import OrderedDict from devlib.utils.types import identifier try: import psycopg2 from psycopg2 import connect, OperationalError, extras from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT except ImportError as e: psycopg2 = None import_error_msg = e.args[0] if e.args else str(e) from wa import ComplexCommand, SubCommand, pluginloader, settings from wa.framework.target.descriptor import list_target_descriptions from wa.framework.exception import ConfigError, CommandError from wa.instruments.energy_measurement import EnergyInstrumentBackend from wa.utils.misc import (ensure_directory_exists as _d, capitalize, ensure_file_directory_exists as _f) from wa.utils.postgres import get_schema, POSTGRES_SCHEMA_DIR from wa.utils.serializer import yaml if sys.version_info >= (3, 8): def copy_tree(src, dst): from shutil import copy, copytree # pylint: disable=import-outside-toplevel copytree( src, dst, # dirs_exist_ok=True only exists in Python >= 3.8 dirs_exist_ok=True, # Align with devlib and only copy the content without metadata copy_function=copy ) else: def copy_tree(src, dst): # pylint: disable=import-outside-toplevel, redefined-outer-name from distutils.dir_util import copy_tree # Align with devlib and only copy the content without metadata copy_tree(src, dst, preserve_mode=False, preserve_times=False) TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates') class CreateDatabaseSubcommand(SubCommand): name = 'database' description = """ Create a Postgresql database which is compatible with the WA Postgres output processor. """ schemafilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql') schemaupdatefilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema_update_v{}.{}.sql') def __init__(self, *args, **kwargs): super(CreateDatabaseSubcommand, self).__init__(*args, **kwargs) self.sql_commands = None self.schema_major = None self.schema_minor = None self.postgres_host = None self.postgres_port = None self.username = None self.password = None self.dbname = None self.config_file = None self.force = None def initialize(self, context): self.parser.add_argument( '-a', '--postgres-host', default='localhost', help='The host on which to create the database.') self.parser.add_argument( '-k', '--postgres-port', default='5432', help='The port on which the PostgreSQL server is running.') self.parser.add_argument( '-u', '--username', default='postgres', help='The username with which to connect to the server.') self.parser.add_argument( '-p', '--password', help='The password for the user account.') self.parser.add_argument( '-d', '--dbname', default='wa', help='The name of the database to create.') self.parser.add_argument( '-f', '--force', action='store_true', help='Force overwrite the existing database if one exists.') self.parser.add_argument( '-F', '--force-update-config', action='store_true', help='Force update the config file if an entry exists.') self.parser.add_argument( '-r', '--config-file', default=settings.user_config_file, help='Path to the config file to be updated.') self.parser.add_argument( '-x', '--schema-version', action='store_true', help='Display the current schema version.') self.parser.add_argument( '-U', '--upgrade', action='store_true', help='Upgrade the database to use the latest schema version.') def execute(self, state, args): # pylint: disable=too-many-branches if not psycopg2: raise CommandError( 'The module psycopg2 is required for the wa ' + 'create database command.') if args.dbname == 'postgres': raise ValueError('Databasename to create cannot be postgres.') self._parse_args(args) self.schema_major, self.schema_minor, self.sql_commands = get_schema(self.schemafilepath) # Display the version if needed and exit if args.schema_version: self.logger.info( 'The current schema version is {}.{}'.format(self.schema_major, self.schema_minor)) return if args.upgrade: self.update_schema() return # Open user configuration with open(self.config_file, 'r') as config_file: config = yaml.load(config_file) if 'postgres' in config and not args.force_update_config: raise CommandError( "The entry 'postgres' already exists in the config file. " + "Please specify the -F flag to force an update.") possible_connection_errors = [ ( re.compile('FATAL: role ".*" does not exist'), 'Username does not exist or password is incorrect' ), ( re.compile('FATAL: password authentication failed for user'), 'Password was incorrect' ), ( re.compile('fe_sendauth: no password supplied'), 'Passwordless connection is not enabled. ' 'Please enable trust in pg_hba for this host ' 'or use a password' ), ( re.compile('FATAL: no pg_hba.conf entry for'), 'Host is not allowed to connect to the specified database ' 'using this user according to pg_hba.conf. Please change the ' 'rules in pg_hba or your connection method' ), ( re.compile('FATAL: pg_hba.conf rejects connection'), 'Connection was rejected by pg_hba.conf' ), ] def predicate(error, handle): if handle[0].match(str(error)): raise CommandError(handle[1] + ': \n' + str(error)) # Attempt to create database try: self.create_database() except OperationalError as e: for handle in possible_connection_errors: predicate(e, handle) raise e # Update the configuration file self._update_configuration_file(config) def create_database(self): self._validate_version() self._check_database_existence() self._create_database_postgres() self._apply_database_schema(self.sql_commands, self.schema_major, self.schema_minor) self.logger.info( "Successfully created the database {}".format(self.dbname)) def update_schema(self): self._validate_version() schema_major, schema_minor, _ = get_schema(self.schemafilepath) meta_oid, current_major, current_minor = self._get_database_schema_version() while not (schema_major == current_major and schema_minor == current_minor): current_minor = self._update_schema_minors(current_major, current_minor, meta_oid) current_major, current_minor = self._update_schema_major(current_major, current_minor, meta_oid) msg = "Database schema update of '{}' to v{}.{} complete" self.logger.info(msg.format(self.dbname, schema_major, schema_minor)) def _update_schema_minors(self, major, minor, meta_oid): # Upgrade all available minor versions while True: minor += 1 schema_update = os.path.join(POSTGRES_SCHEMA_DIR, self.schemaupdatefilepath.format(major, minor)) if not os.path.exists(schema_update): break _, _, sql_commands = get_schema(schema_update) self._apply_database_schema(sql_commands, major, minor, meta_oid) msg = "Updated the database schema to v{}.{}" self.logger.debug(msg.format(major, minor)) # Return last existing update file version return minor - 1 def _update_schema_major(self, current_major, current_minor, meta_oid): current_major += 1 schema_update = os.path.join(POSTGRES_SCHEMA_DIR, self.schemaupdatefilepath.format(current_major, 0)) if not os.path.exists(schema_update): return (current_major - 1, current_minor) # Reset minor to 0 with major version bump current_minor = 0 _, _, sql_commands = get_schema(schema_update) self._apply_database_schema(sql_commands, current_major, current_minor, meta_oid) msg = "Updated the database schema to v{}.{}" self.logger.debug(msg.format(current_major, current_minor)) return (current_major, current_minor) def _validate_version(self): conn = connect(user=self.username, password=self.password, host=self.postgres_host, port=self.postgres_port) if conn.server_version < 90400: msg = 'Postgres version too low. Please ensure that you are using atleast v9.4' raise CommandError(msg) def _get_database_schema_version(self): conn = connect(dbname=self.dbname, user=self.username, password=self.password, host=self.postgres_host, port=self.postgres_port) cursor = conn.cursor() cursor.execute('''SELECT DatabaseMeta.oid, DatabaseMeta.schema_major, DatabaseMeta.schema_minor FROM DatabaseMeta;''') return cursor.fetchone() def _check_database_existence(self): try: connect(dbname=self.dbname, user=self.username, password=self.password, host=self.postgres_host, port=self.postgres_port) except OperationalError as e: # Expect an operational error (database's non-existence) if not re.compile('FATAL: database ".*" does not exist').match(str(e)): raise e else: if not self.force: raise CommandError( "Database {} already exists. ".format(self.dbname) + "Please specify the -f flag to create it from afresh." ) def _create_database_postgres(self): conn = connect(dbname='postgres', user=self.username, password=self.password, host=self.postgres_host, port=self.postgres_port) conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) cursor = conn.cursor() cursor.execute('DROP DATABASE IF EXISTS ' + self.dbname) cursor.execute('CREATE DATABASE ' + self.dbname) conn.commit() cursor.close() conn.close() def _apply_database_schema(self, sql_commands, schema_major, schema_minor, meta_uuid=None): conn = connect(dbname=self.dbname, user=self.username, password=self.password, host=self.postgres_host, port=self.postgres_port) cursor = conn.cursor() cursor.execute(sql_commands) if not meta_uuid: extras.register_uuid() meta_uuid = uuid.uuid4() cursor.execute("INSERT INTO DatabaseMeta VALUES (%s, %s, %s)", (meta_uuid, schema_major, schema_minor )) else: cursor.execute("UPDATE DatabaseMeta SET schema_major = %s, schema_minor = %s WHERE oid = %s;", (schema_major, schema_minor, meta_uuid )) conn.commit() cursor.close() conn.close() def _update_configuration_file(self, config): ''' Update the user configuration file with the newly created database's configuration. ''' config['postgres'] = OrderedDict( [('host', self.postgres_host), ('port', self.postgres_port), ('dbname', self.dbname), ('username', self.username), ('password', self.password)]) with open(self.config_file, 'w+') as config_file: yaml.dump(config, config_file) def _parse_args(self, args): self.postgres_host = args.postgres_host self.postgres_port = args.postgres_port self.username = args.username self.password = args.password self.dbname = args.dbname self.config_file = args.config_file self.force = args.force class CreateAgendaSubcommand(SubCommand): name = 'agenda' description = """ Create an agenda with the specified extensions enabled. And parameters set to their default values. """ def initialize(self, context): self.parser.add_argument('plugins', nargs='+', help='Plugins to be added to the agendas') self.parser.add_argument('-i', '--iterations', type=int, default=1, help='Sets the number of iterations for all workloads') self.parser.add_argument('-o', '--output', metavar='FILE', help='Output file. If not specfied, STDOUT will be used instead.') # pylint: disable=too-many-branches def execute(self, state, args): agenda = OrderedDict() agenda['config'] = OrderedDict(augmentations=[], iterations=args.iterations) agenda['workloads'] = [] target_desc = None targets = {td.name: td for td in list_target_descriptions()} for name in args.plugins: if name in targets: if target_desc is not None: raise ConfigError('Specifying multiple devices: {} and {}'.format(target_desc.name, name)) target_desc = targets[name] agenda['config']['device'] = name agenda['config']['device_config'] = target_desc.get_default_config() continue extcls = pluginloader.get_plugin_class(name) config = pluginloader.get_default_config(name) # Handle special case for EnergyInstrumentBackends if issubclass(extcls, EnergyInstrumentBackend): if 'energy_measurement' not in agenda['config']['augmentations']: energy_config = pluginloader.get_default_config('energy_measurement') agenda['config']['augmentations'].append('energy_measurement') agenda['config']['energy_measurement'] = energy_config agenda['config']['energy_measurement']['instrument'] = extcls.name agenda['config']['energy_measurement']['instrument_parameters'] = config elif extcls.kind == 'workload': entry = OrderedDict() entry['name'] = extcls.name if name != extcls.name: entry['label'] = name entry['params'] = config agenda['workloads'].append(entry) else: if extcls.kind in ('instrument', 'output_processor'): if extcls.name not in agenda['config']['augmentations']: agenda['config']['augmentations'].append(extcls.name) if extcls.name not in agenda['config']: agenda['config'][extcls.name] = config if args.output: wfh = open(args.output, 'w') else: wfh = sys.stdout yaml.dump(agenda, wfh, indent=4, default_flow_style=False) if args.output: wfh.close() class CreateWorkloadSubcommand(SubCommand): name = 'workload' description = '''Create a new workload. By default, a basic workload template will be used but you can specify the `KIND` to choose a different template.''' def initialize(self, context): self.parser.add_argument('name', metavar='NAME', help='Name of the workload to be created') self.parser.add_argument('-p', '--path', metavar='PATH', default=None, help='The location at which the workload will be created. If not specified, ' + 'this defaults to "~/.workload_automation/plugins".') self.parser.add_argument('-f', '--force', action='store_true', help='Create the new workload even if a workload with the specified ' + 'name already exists.') self.parser.add_argument('-k', '--kind', metavar='KIND', default='basic', choices=list(create_funcs.keys()), help='The type of workload to be created. The available options ' + 'are: {}'.format(', '.join(list(create_funcs.keys())))) def execute(self, state, args): # pylint: disable=R0201 where = args.path or 'local' check_name = not args.force try: create_workload(args.name, args.kind, where, check_name) except CommandError as e: self.logger.error('ERROR: {}'.format(e)) class CreatePackageSubcommand(SubCommand): name = 'package' description = '''Create a new empty Python package for WA extensions. On installation, this package will "advertise" itself to WA so that Plugins within it will be loaded by WA when it runs.''' def initialize(self, context): self.parser.add_argument('name', metavar='NAME', help='Name of the package to be created') self.parser.add_argument('-p', '--path', metavar='PATH', default=None, help='The location at which the new package will be created. If not specified, ' + 'current working directory will be used.') self.parser.add_argument('-f', '--force', action='store_true', help='Create the new package even if a file or directory with the same name ' 'already exists at the specified location.') def execute(self, state, args): # pylint: disable=R0201 package_dir = args.path or os.path.abspath('.') template_path = os.path.join(TEMPLATES_DIR, 'setup.template') self.create_extensions_package(package_dir, args.name, template_path, args.force) def create_extensions_package(self, location, name, setup_template_path, overwrite=False): package_path = os.path.join(location, name) if os.path.exists(package_path): if overwrite: self.logger.info('overwriting existing "{}"'.format(package_path)) shutil.rmtree(package_path) else: raise CommandError('Location "{}" already exists.'.format(package_path)) actual_package_path = os.path.join(package_path, name) os.makedirs(actual_package_path) setup_text = render_template(setup_template_path, {'package_name': name, 'user': getpass.getuser()}) with open(os.path.join(package_path, 'setup.py'), 'w') as wfh: wfh.write(setup_text) touch(os.path.join(actual_package_path, '__init__.py')) class CreateCommand(ComplexCommand): name = 'create' description = ''' Used to create various WA-related objects (see positional arguments list for what objects may be created).\n\nUse "wa create -h" for object-specific arguments. ''' subcmd_classes = [ CreateDatabaseSubcommand, CreateWorkloadSubcommand, CreateAgendaSubcommand, CreatePackageSubcommand, ] def create_workload(name, kind='basic', where='local', check_name=True, **kwargs): if check_name: if name in [wl.name for wl in pluginloader.list_plugins('workload')]: raise CommandError('Workload with name "{}" already exists.'.format(name)) class_name = get_class_name(name) if where == 'local': workload_dir = _d(os.path.join(settings.plugins_directory, name)) else: workload_dir = _d(os.path.join(where, name)) try: # Note: `create_funcs` mapping is listed below create_funcs[kind](workload_dir, name, kind, class_name, **kwargs) except KeyError: raise CommandError('Unknown workload type: {}'.format(kind)) # pylint: disable=superfluous-parens print('Workload created in {}'.format(workload_dir)) def create_template_workload(path, name, kind, class_name): source_file = os.path.join(path, '__init__.py') with open(source_file, 'w') as wfh: wfh.write(render_template('{}_workload'.format(kind), {'name': name, 'class_name': class_name})) def create_uiautomator_template_workload(path, name, kind, class_name): uiauto_path = os.path.join(path, 'uiauto') create_uiauto_project(uiauto_path, name) create_template_workload(path, name, kind, class_name) def create_uiauto_project(path, name): package_name = 'com.arm.wa.uiauto.' + name.lower() copy_tree(os.path.join(TEMPLATES_DIR, 'uiauto', 'uiauto_workload_template'), path) manifest_path = os.path.join(path, 'app', 'src', 'main') mainifest = os.path.join(_d(manifest_path), 'AndroidManifest.xml') with open(mainifest, 'w') as wfh: wfh.write(render_template(os.path.join('uiauto', 'uiauto_AndroidManifest.xml'), {'package_name': package_name})) build_gradle_path = os.path.join(path, 'app') build_gradle = os.path.join(_d(build_gradle_path), 'build.gradle') with open(build_gradle, 'w') as wfh: wfh.write(render_template(os.path.join('uiauto', 'uiauto_build.gradle'), {'package_name': package_name})) build_script = os.path.join(path, 'build.sh') with open(build_script, 'w') as wfh: wfh.write(render_template(os.path.join('uiauto', 'uiauto_build_script'), {'package_name': package_name})) os.chmod(build_script, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) source_file = _f(os.path.join(path, 'app', 'src', 'main', 'java', os.sep.join(package_name.split('.')[:-1]), 'UiAutomation.java')) with open(source_file, 'w') as wfh: wfh.write(render_template(os.path.join('uiauto', 'UiAutomation.java'), {'name': name, 'package_name': package_name})) # Mapping of workload types to their corresponding creation method create_funcs = { 'basic': create_template_workload, 'apk': create_template_workload, 'revent': create_template_workload, 'apkrevent': create_template_workload, 'uiauto': create_uiautomator_template_workload, 'apkuiauto': create_uiautomator_template_workload, } # Utility functions def render_template(name, params): filepath = os.path.join(TEMPLATES_DIR, name) with open(filepath) as fh: text = fh.read() template = string.Template(text) return template.substitute(params) def get_class_name(name, postfix=''): name = identifier(name) return ''.join(map(capitalize, name.split('_'))) + postfix def touch(path): with open(path, 'w') as _: # NOQA pass ================================================ FILE: wa/commands/list.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa import Command from wa.framework import pluginloader from wa.framework.target.descriptor import list_target_descriptions from wa.utils.doc import get_summary from wa.utils.formatter import DescriptionListFormatter class ListCommand(Command): name = 'list' description = 'List available WA plugins with a short description of each.' def initialize(self, context): kinds = get_kinds() kinds.extend(['augmentations', 'all']) self.parser.add_argument('kind', metavar='KIND', help=('Specify the kind of plugin to list. Must be ' 'one of: {}'.format(', '.join(sorted(kinds)))), choices=sorted(kinds)) self.parser.add_argument('-n', '--name', help='Filter results by the name specified') self.parser.add_argument('-o', '--packaged-only', action='store_true', help=''' Only list plugins packaged with WA itself. Do not list plugins installed locally or from other packages. ''') self.parser.add_argument('-p', '--platform', help=''' Only list results that are supported by the specified platform. ''') # pylint: disable=superfluous-parens def execute(self, state, args): filters = {} if args.name: filters['name'] = args.name if args.kind == 'targets': list_targets() elif args.kind == 'augmentations': print('instruments:') args.kind = 'instruments' list_plugins(args, filters) print('\noutput processors:') args.kind = 'output_processors' list_plugins(args, filters) elif args.kind == 'all': for kind in sorted(get_kinds()): print('\n{}:'.format(kind)) if kind == 'targets': list_targets() else: args.kind = kind list_plugins(args, filters) else: list_plugins(args, filters) def get_kinds(): kinds = pluginloader.kinds if 'target_descriptor' in kinds: kinds.remove('target_descriptor') kinds.append('target') return ['{}s'.format(name) for name in kinds] # pylint: disable=superfluous-parens def list_targets(): targets = list_target_descriptions() targets = sorted(targets, key=lambda x: x.name) output = DescriptionListFormatter() for target in targets: output.add_item(target.description or '', target.name) print(output.format_data()) print('') def list_plugins(args, filters): results = pluginloader.list_plugins(args.kind[:-1]) if filters or args.platform: filtered_results = [] for result in results: passed = True for k, v in filters.items(): if getattr(result, k) != v: passed = False break if passed and args.platform: passed = check_platform(result, args.platform) if passed: filtered_results.append(result) else: # no filters specified filtered_results = results if filtered_results: output = DescriptionListFormatter() for result in sorted(filtered_results, key=lambda x: x.name): output.add_item(get_summary(result), result.name) print(output.format_data()) print('') def check_platform(plugin, platform): supported_platforms = getattr(plugin, 'supported_platforms', []) if supported_platforms: return platform in supported_platforms return True ================================================ FILE: wa/commands/postgres_schemas/postgres_schema.sql ================================================ --!VERSION!1.6!ENDVERSION! CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; CREATE EXTENSION IF NOT EXISTS "lo"; -- In future, it may be useful to implement rules on which Parameter oid fields can be none depeendent on the value in the type column; DROP TABLE IF EXISTS DatabaseMeta; DROP TABLE IF EXISTS Parameters; DROP TABLE IF EXISTS Classifiers; DROP TABLE IF EXISTS LargeObjects; DROP TABLE IF EXISTS Artifacts; DROP TABLE IF EXISTS Metrics; DROP TABLE IF EXISTS Augmentations; DROP TABLE IF EXISTS Jobs_Augs; DROP TABLE IF EXISTS ResourceGetters; DROP TABLE IF EXISTS Resource_Getters; DROP TABLE IF EXISTS Events; DROP TABLE IF EXISTS Targets; DROP TABLE IF EXISTS Jobs; DROP TABLE IF EXISTS Runs; DROP TYPE IF EXISTS status_enum; DROP TYPE IF EXISTS param_enum; CREATE TYPE status_enum AS ENUM ('UNKNOWN(0)','NEW(1)','PENDING(2)','STARTED(3)','CONNECTED(4)', 'INITIALIZED(5)', 'RUNNING(6)', 'OK(7)', 'PARTIAL(8)', 'FAILED(9)', 'ABORTED(10)', 'SKIPPED(11)'); CREATE TYPE param_enum AS ENUM ('workload', 'resource_getter', 'augmentation', 'device', 'runtime', 'boot'); -- In future, it might be useful to create an ENUM type for the artifact kind, or simply a generic enum type; CREATE TABLE DatabaseMeta ( oid uuid NOT NULL, schema_major int, schema_minor int, PRIMARY KEY (oid) ); CREATE TABLE Runs ( oid uuid NOT NULL, event_summary text, basepath text, status status_enum, timestamp timestamp, run_name text, project text, project_stage text, retry_on_status status_enum[], max_retries int, bail_on_init_failure boolean, allow_phone_home boolean, run_uuid uuid, start_time timestamp, end_time timestamp, duration float, metadata jsonb, _pod_version int, _pod_serialization_version int, state jsonb, PRIMARY KEY (oid) ); CREATE TABLE Jobs ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, status status_enum, retry int, label text, job_id text, iterations int, workload_name text, metadata jsonb, _pod_version int, _pod_serialization_version int, PRIMARY KEY (oid) ); CREATE TABLE Targets ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, target text, modules text[], cpus text[], os text, os_version jsonb, hostid bigint, hostname text, abi text, is_rooted boolean, kernel_version text, kernel_release text, kernel_sha1 text, kernel_config text[], sched_features text[], page_size_kb int, screen_resolution int[], prop json, android_id text, _pod_version int, _pod_serialization_version int, system_id text, PRIMARY KEY (oid) ); CREATE TABLE Events ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, job_oid uuid references Jobs(oid), timestamp timestamp, message text, _pod_version int, _pod_serialization_version int, PRIMARY KEY (oid) ); CREATE TABLE Resource_Getters ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, name text, PRIMARY KEY (oid) ); CREATE TABLE Augmentations ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, name text, PRIMARY KEY (oid) ); CREATE TABLE Jobs_Augs ( oid uuid NOT NULL, job_oid uuid NOT NULL references Jobs(oid) ON DELETE CASCADE, augmentation_oid uuid NOT NULL references Augmentations(oid) ON DELETE CASCADE, PRIMARY KEY (oid) ); CREATE TABLE Metrics ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, job_oid uuid references Jobs(oid), name text, value double precision, units text, lower_is_better boolean, _pod_version int, _pod_serialization_version int, PRIMARY KEY (oid) ); CREATE TABLE LargeObjects ( oid uuid NOT NULL, lo_oid lo NOT NULL, PRIMARY KEY (oid) ); -- Trigger that allows you to manage large objects from the LO table directly; CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON LargeObjects FOR EACH ROW EXECUTE PROCEDURE lo_manage(lo_oid); CREATE TABLE Artifacts ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, job_oid uuid references Jobs(oid), name text, large_object_uuid uuid NOT NULL references LargeObjects(oid), description text, kind text, _pod_version int, _pod_serialization_version int, is_dir boolean, PRIMARY KEY (oid) ); CREATE RULE del_lo AS ON DELETE TO Artifacts DO DELETE FROM LargeObjects WHERE LargeObjects.oid = old.large_object_uuid ; CREATE TABLE Classifiers ( oid uuid NOT NULL, artifact_oid uuid references Artifacts(oid) ON DELETE CASCADE, metric_oid uuid references Metrics(oid) ON DELETE CASCADE, job_oid uuid references Jobs(oid) ON DELETE CASCADE, run_oid uuid references Runs(oid) ON DELETE CASCADE, key text, value text, PRIMARY KEY (oid) ); CREATE TABLE Parameters ( oid uuid NOT NULL, run_oid uuid NOT NULL references Runs(oid) ON DELETE CASCADE, job_oid uuid references Jobs(oid), augmentation_oid uuid references Augmentations(oid), resource_getter_oid uuid references Resource_Getters(oid), name text, value text, value_type text, type param_enum, PRIMARY KEY (oid) ); ================================================ FILE: wa/commands/postgres_schemas/postgres_schema_update_v1.2.sql ================================================ ALTER TABLE resourcegetters RENAME TO resource_getters; ALTER TABLE classifiers ADD COLUMN job_oid uuid references Jobs(oid); ALTER TABLE classifiers ADD COLUMN run_oid uuid references Runs(oid); ALTER TABLE targets ADD COLUMN page_size_kb int; ALTER TABLE targets ADD COLUMN screen_resolution int[]; ALTER TABLE targets ADD COLUMN prop text; ALTER TABLE targets ADD COLUMN android_id text; ALTER TABLE targets ADD COLUMN _pod_version int; ALTER TABLE targets ADD COLUMN _pod_serialization_version int; ALTER TABLE jobs RENAME COLUMN retries TO retry; ALTER TABLE jobs ADD COLUMN _pod_version int; ALTER TABLE jobs ADD COLUMN _pod_serialization_version int; ALTER TABLE runs ADD COLUMN project_stage text; ALTER TABLE runs ADD COLUMN state jsonb; ALTER TABLE runs ADD COLUMN duration float; ALTER TABLE runs ADD COLUMN _pod_version int; ALTER TABLE runs ADD COLUMN _pod_serialization_version int; ALTER TABLE artifacts ADD COLUMN _pod_version int; ALTER TABLE artifacts ADD COLUMN _pod_serialization_version int; ALTER TABLE events ADD COLUMN _pod_version int; ALTER TABLE events ADD COLUMN _pod_serialization_version int; ALTER TABLE metrics ADD COLUMN _pod_version int; ALTER TABLE metrics ADD COLUMN _pod_serialization_version int; ================================================ FILE: wa/commands/postgres_schemas/postgres_schema_update_v1.3.sql ================================================ ALTER TABLE targets ADD COLUMN system_id text; ALTER TABLE artifacts ADD COLUMN is_dir boolean; ================================================ FILE: wa/commands/postgres_schemas/postgres_schema_update_v1.4.sql ================================================ ALTER TABLE targets ADD COLUMN modules text[]; ================================================ FILE: wa/commands/postgres_schemas/postgres_schema_update_v1.5.sql ================================================ ALTER TABLE targets ALTER hostid TYPE BIGINT; ================================================ FILE: wa/commands/postgres_schemas/postgres_schema_update_v1.6.sql ================================================ ALTER TABLE jobs DROP CONSTRAINT jobs_run_oid_fkey, ADD CONSTRAINT jobs_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ALTER TABLE targets DROP CONSTRAINT targets_run_oid_fkey, ADD CONSTRAINT targets_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ALTER TABLE events DROP CONSTRAINT events_run_oid_fkey, ADD CONSTRAINT events_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ALTER TABLE resource_getters DROP CONSTRAINT resource_getters_run_oid_fkey, ADD CONSTRAINT resource_getters_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ALTER TABLE augmentations DROP CONSTRAINT augmentations_run_oid_fkey, ADD CONSTRAINT augmentations_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ALTER TABLE jobs_augs DROP CONSTRAINT jobs_augs_job_oid_fkey, DROP CONSTRAINT jobs_augs_augmentation_oid_fkey, ADD CONSTRAINT jobs_augs_job_oid_fkey FOREIGN KEY (job_oid) REFERENCES Jobs(oid) ON DELETE CASCADE, ADD CONSTRAINT jobs_augs_augmentation_oid_fkey FOREIGN KEY (augmentation_oid) REFERENCES Augmentations(oid) ON DELETE CASCADE ; ALTER TABLE metrics DROP CONSTRAINT metrics_run_oid_fkey, ADD CONSTRAINT metrics_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ALTER TABLE artifacts DROP CONSTRAINT artifacts_run_oid_fkey, ADD CONSTRAINT artifacts_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; CREATE RULE del_lo AS ON DELETE TO Artifacts DO DELETE FROM LargeObjects WHERE LargeObjects.oid = old.large_object_uuid ; ALTER TABLE classifiers DROP CONSTRAINT classifiers_artifact_oid_fkey, DROP CONSTRAINT classifiers_metric_oid_fkey, DROP CONSTRAINT classifiers_job_oid_fkey, DROP CONSTRAINT classifiers_run_oid_fkey, ADD CONSTRAINT classifiers_artifact_oid_fkey FOREIGN KEY (artifact_oid) REFERENCES artifacts(oid) ON DELETE CASCADE, ADD CONSTRAINT classifiers_metric_oid_fkey FOREIGN KEY (metric_oid) REFERENCES metrics(oid) ON DELETE CASCADE, ADD CONSTRAINT classifiers_job_oid_fkey FOREIGN KEY (job_oid) REFERENCES jobs(oid) ON DELETE CASCADE, ADD CONSTRAINT classifiers_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ALTER TABLE parameters DROP CONSTRAINT parameters_run_oid_fkey, ADD CONSTRAINT parameters_run_oid_fkey FOREIGN KEY (run_oid) REFERENCES runs(oid) ON DELETE CASCADE ; ================================================ FILE: wa/commands/process.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from wa import Command from wa import discover_wa_outputs from wa.framework.configuration.core import Status from wa.framework.exception import CommandError from wa.framework.output import RunOutput from wa.framework.output_processor import ProcessorManager from wa.utils import log class ProcessContext(object): def __init__(self): self.run_output = None self.target_info = None self.job_output = None def add_augmentation(self, aug): pass class ProcessCommand(Command): name = 'process' description = 'Process the output from previously run workloads.' def initialize(self, context): self.parser.add_argument('directory', metavar='DIR', help=""" Specify a directory containing the data from a previous run to be processed. """) self.parser.add_argument('-p', '--processor', action='append', dest='additional_processors', metavar='OutputProcessor', help=""" Specify an output processor to add from the command line. This can be used to run a processor that is not normally used without introducing permanent change to the config (which one might then forget to revert). This option may be specified multiple times. """) self.parser.add_argument('-f', '--force', action='store_true', help=""" Run processors that have already been run. By default these will be skipped. Also, forces processing of in-progress runs. """) self.parser.add_argument('-r', '--recursive', action='store_true', help=""" Walk the specified directory to process all of the previous runs contained within instead of just processing the root. """) def execute(self, config, args): # pylint: disable=arguments-differ,too-many-branches,too-many-statements process_directory = os.path.expandvars(args.directory) self.logger.debug('Using process directory: {}'.format(process_directory)) if not os.path.exists(process_directory): msg = 'Path `{}` does not exist, please specify a valid path.' raise CommandError(msg.format(process_directory)) if not args.recursive: output_list = [RunOutput(process_directory)] else: output_list = list(discover_wa_outputs(process_directory)) pc = ProcessContext() for run_output in output_list: if run_output.status < Status.OK and not args.force: msg = 'Skipping {} as it has not completed -- {}' self.logger.info(msg.format(run_output.basepath, run_output.status)) continue pc.run_output = run_output pc.target_info = run_output.target_info if not args.recursive: self.logger.info('Installing output processors') else: self.logger.info('Install output processors for run in path `{}`' .format(run_output.basepath)) logfile = os.path.join(run_output.basepath, 'process.log') i = 0 while os.path.exists(logfile): i += 1 logfile = os.path.join(run_output.basepath, 'process-{}.log'.format(i)) log.add_file(logfile) pm = ProcessorManager(loader=config.plugin_cache) for proc in config.get_processors(): pm.install(proc, pc) if args.additional_processors: for proc in args.additional_processors: # Do not add any processors that are already present since # duplicate entries do not get disabled. try: pm.get_output_processor(proc) except ValueError: pm.install(proc, pc) pm.validate() pm.initialize(pc) for job_output in run_output.jobs: if job_output.status < Status.OK or job_output.status in [Status.SKIPPED, Status.ABORTED]: msg = 'Skipping job {} {} iteration {} -- {}' self.logger.info(msg.format(job_output.id, job_output.label, job_output.iteration, job_output.status)) continue pc.job_output = job_output pm.enable_all() if not args.force: for augmentation in job_output.spec.augmentations: try: pm.disable(augmentation) except ValueError: pass msg = 'Processing job {} {} iteration {}' self.logger.info(msg.format(job_output.id, job_output.label, job_output.iteration)) pm.process_job_output(pc) pm.export_job_output(pc) job_output.write_result() pm.enable_all() if not args.force: for augmentation in run_output.augmentations: try: pm.disable(augmentation) except ValueError: pass self.logger.info('Processing run') pm.process_run_output(pc) pm.export_run_output(pc) pm.finalize(pc) run_output.write_info() run_output.write_result() self.logger.info('Done.') ================================================ FILE: wa/commands/report.py ================================================ from collections import Counter from datetime import datetime, timedelta import logging import os from wa import Command, settings from wa.framework.configuration.core import Status from wa.framework.output import RunOutput, discover_wa_outputs from wa.utils.doc import underline from wa.utils.log import COLOR_MAP, RESET_COLOR from wa.utils.terminalsize import get_terminal_size class ReportCommand(Command): name = 'report' description = ''' Monitor an ongoing run and provide information on its progress. Specify the output directory of the run you would like the monitor; alternatively report will attempt to discover wa output directories within the current directory. The output includes run information such as the UUID, start time, duration, project name and a short summary of the run's progress (number of completed jobs, the number of jobs in each different status). If verbose output is specified, the output includes a list of all events labelled as not specific to any job, followed by a list of the jobs in the order executed, with their retries (if any), current status and, if the job is finished, a list of events that occurred during that job's execution. This is an example of a job status line: wk1 (exoplayer) [1] - 2, PARTIAL It contains two entries delimited by a comma: the job's descriptor followed by its completion status (``PARTIAL``, in this case). The descriptor consists of the following elements: - the job ID (``wk1``) - the job label (which defaults to the workload name) in parentheses - job iteration number in square brakets (``1`` in this case) - a hyphen followed by the retry attempt number. (note: this will only be shown if the job has been retried as least once. If the job has not yet run, or if it completed on the first attempt, the hyphen and retry count -- which in that case would be zero -- will not appear). ''' def initialize(self, context): self.parser.add_argument('-d', '--directory', help=''' Specify the WA output path. report will otherwise attempt to discover output directories in the current directory. ''') def execute(self, state, args): if args.directory: output_path = args.directory run_output = RunOutput(output_path) else: possible_outputs = list(discover_wa_outputs(os.getcwd())) num_paths = len(possible_outputs) if num_paths > 1: print('More than one possible output directory found,' ' please choose a path from the following:' ) for i in range(num_paths): print("{}: {}".format(i, possible_outputs[i].basepath)) while True: try: select = int(input()) except ValueError: print("Please select a valid path number") continue if select not in range(num_paths): print("Please select a valid path number") continue break run_output = possible_outputs[select] else: run_output = possible_outputs[0] rm = RunMonitor(run_output) print(rm.generate_output(args.verbose)) class RunMonitor: @property def elapsed_time(self): if self._elapsed is None: if self.ro.info.duration is None: self._elapsed = datetime.utcnow() - self.ro.info.start_time else: self._elapsed = self.ro.info.duration return self._elapsed @property def job_outputs(self): if self._job_outputs is None: self._job_outputs = { (j_o.id, j_o.label, j_o.iteration): j_o for j_o in self.ro.jobs } return self._job_outputs @property def projected_duration(self): elapsed = self.elapsed_time.total_seconds() proj = timedelta(seconds=elapsed * (len(self.jobs) / len(self.segmented['finished']))) return proj - self.elapsed_time def __init__(self, ro): self.ro = ro self._elapsed = None self._p_duration = None self._job_outputs = None self._termwidth = None self._fmt = _simple_formatter() self.get_data() def get_data(self): self.jobs = [state for label_id, state in self.ro.state.jobs.items()] if self.jobs: rc = self.ro.run_config self.segmented = segment_jobs_by_state(self.jobs, rc.max_retries, rc.retry_on_status ) def generate_run_header(self): info = self.ro.info header = underline('Run Info') header += "UUID: {}\n".format(info.uuid) if info.run_name: header += "Run name: {}\n".format(info.run_name) if info.project: header += "Project: {}\n".format(info.project) if info.project_stage: header += "Project stage: {}\n".format(info.project_stage) if info.start_time: duration = _seconds_as_smh(self.elapsed_time.total_seconds()) header += ("Start time: {}\n" "Duration: {:02}:{:02}:{:02}\n" ).format(info.start_time, duration[2], duration[1], duration[0], ) if self.segmented['finished'] and not info.end_time: p_duration = _seconds_as_smh(self.projected_duration.total_seconds()) header += "Projected time remaining: {:02}:{:02}:{:02}\n".format( p_duration[2], p_duration[1], p_duration[0] ) elif self.ro.info.end_time: header += "End time: {}\n".format(info.end_time) return header + '\n' def generate_job_summary(self): total = len(self.jobs) num_fin = len(self.segmented['finished']) summary = underline('Job Summary') summary += 'Total: {}, Completed: {} ({}%)\n'.format( total, num_fin, (num_fin / total) * 100 ) if total > 0 else 'No jobs created\n' ctr = Counter() for run_state, jobs in ((k, v) for k, v in self.segmented.items() if v): if run_state == 'finished': ctr.update([job.status.name.lower() for job in jobs]) else: ctr[run_state] += len(jobs) return summary + ', '.join( [str(count) + ' ' + self._fmt.highlight_keyword(status) for status, count in ctr.items()] ) + '\n\n' def generate_job_detail(self): detail = underline('Job Detail') for job in self.jobs: detail += ('{} ({}) [{}]{}, {}\n').format( job.id, job.label, job.iteration, ' - ' + str(job.retries)if job.retries else '', self._fmt.highlight_keyword(str(job.status)) ) job_output = self.job_outputs[(job.id, job.label, job.iteration)] for event in job_output.events: detail += self._fmt.fit_term_width( '\t{}\n'.format(event.summary) ) return detail def generate_run_detail(self): detail = underline('Run Events') if self.ro.events else '' for event in self.ro.events: detail += '{}\n'.format(event.summary) return detail + '\n' def generate_output(self, verbose): if not self.jobs: return 'No jobs found in output directory\n' output = self.generate_run_header() output += self.generate_job_summary() if verbose: output += self.generate_run_detail() output += self.generate_job_detail() return output def _seconds_as_smh(seconds): seconds = int(seconds) hours = seconds // 3600 minutes = (seconds % 3600) // 60 seconds = seconds % 60 return seconds, minutes, hours def segment_jobs_by_state(jobstates, max_retries, retry_status): finished_states = [ Status.PARTIAL, Status.FAILED, Status.ABORTED, Status.OK, Status.SKIPPED ] segmented = { 'finished': [], 'other': [], 'running': [], 'pending': [], 'uninitialized': [] } for jobstate in jobstates: if (jobstate.status in retry_status) and jobstate.retries < max_retries: segmented['running'].append(jobstate) elif jobstate.status in finished_states: segmented['finished'].append(jobstate) elif jobstate.status == Status.RUNNING: segmented['running'].append(jobstate) elif jobstate.status == Status.PENDING: segmented['pending'].append(jobstate) elif jobstate.status == Status.NEW: segmented['uninitialized'].append(jobstate) else: segmented['other'].append(jobstate) return segmented class _simple_formatter: color_map = { 'running': COLOR_MAP[logging.INFO], 'partial': COLOR_MAP[logging.WARNING], 'failed': COLOR_MAP[logging.CRITICAL], 'aborted': COLOR_MAP[logging.ERROR] } def __init__(self): self.termwidth = get_terminal_size()[0] self.color = settings.logging['color'] def fit_term_width(self, text): text = text.expandtabs() if len(text) <= self.termwidth: return text else: return text[0:self.termwidth - 4] + " ...\n" def highlight_keyword(self, kw): if not self.color or kw not in _simple_formatter.color_map: return kw color = _simple_formatter.color_map[kw.lower()] return '{}{}{}'.format(color, kw, RESET_COLOR) ================================================ FILE: wa/commands/revent.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys from time import sleep from wa import Command from wa.framework import pluginloader from wa.framework.exception import ConfigError from wa.framework.resource import ResourceResolver from wa.framework.target.manager import TargetManager from wa.utils.revent import ReventRecorder class RecordCommand(Command): name = 'record' description = ''' Performs a revent recording This command helps making revent recordings. It will automatically deploy revent and has options to automatically open apps and record specified stages of a workload. Revent allows you to record raw inputs such as screen swipes or button presses. This can be useful for recording inputs for workloads such as games that don't have XML UI layouts that can be used with UIAutomator. As a drawback from this, revent recordings are specific to the device type they were recorded on. WA uses two parts to the names of revent recordings in the format, {device_name}.{suffix}.revent. - device_name can either be specified manually with the ``-d`` argument or it can be automatically determined. On Android device it will be obtained from ``build.prop``, on Linux devices it is obtained from ``/proc/device-tree/model``. - suffix is used by WA to determine which part of the app execution the recording is for, currently these are either ``setup``, ``run``, ``extract_results`` or ``teardown``. All stages are optional for recording and these should be specified with the ``-s``, ``-r``, ``-e`` or ``-t`` arguments respectively, or optionally ``-a`` to indicate all stages should be recorded. ''' def __init__(self, **kwargs): super(RecordCommand, self).__init__(**kwargs) self.tm = None self.target = None self.revent_recorder = None def initialize(self, context): self.parser.add_argument('-d', '--device', metavar='DEVICE', help=''' Specify the device on which to run. This will take precedence over the device (if any) specified in configuration. ''') self.parser.add_argument('-o', '--output', help='Specify the output file', metavar='FILE') self.parser.add_argument('-s', '--setup', help='Record a recording for setup stage', action='store_true') self.parser.add_argument('-r', '--run', help='Record a recording for run stage', action='store_true') self.parser.add_argument('-e', '--extract_results', help='Record a recording for extract_results stage', action='store_true') self.parser.add_argument('-t', '--teardown', help='Record a recording for teardown stage', action='store_true') self.parser.add_argument('-a', '--all', help='Record recordings for available stages', action='store_true') # Need validation self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it', action='store_true') group = self.parser.add_mutually_exclusive_group(required=False) group.add_argument('-p', '--package', help='Android package to launch before recording') group.add_argument('-w', '--workload', help='Name of a revent workload (mostly games)') def validate_args(self, args): if args.clear and not (args.package or args.workload): self.logger.error("Package/Workload must be specified if you want to clear cache") sys.exit() if args.workload and args.output: self.logger.error("Output file cannot be specified with Workload") sys.exit() if not args.workload and (args.setup or args.extract_results or args.teardown or args.all): self.logger.error("Cannot specify a recording stage without a Workload") sys.exit() if args.workload and not any([args.all, args.teardown, args.extract_results, args.run, args.setup]): self.logger.error("Please specify which workload stages you wish to record") sys.exit() def execute(self, state, args): self.validate_args(args) state.run_config.merge_device_config(state.plugin_cache) if args.device: device = args.device device_config = {} else: device = state.run_config.device device_config = state.run_config.device_config or {} if args.output: outdir = os.path.basename(args.output) else: outdir = os.getcwd() self.tm = TargetManager(device, device_config, outdir) self.tm.initialize() self.target = self.tm.target self.revent_recorder = ReventRecorder(self.target) self.revent_recorder.deploy() if args.workload: self.workload_record(args) elif args.package: self.package_record(args) else: self.manual_record(args) self.revent_recorder.remove() def record(self, revent_file, name, output_path): msg = 'Press Enter when you are ready to record {}...' self.logger.info(msg.format(name)) input('') self.revent_recorder.start_record(revent_file) msg = 'Press Enter when you have finished recording {}...' self.logger.info(msg.format(name)) input('') self.revent_recorder.stop_record() if not os.path.isdir(output_path): os.makedirs(output_path) revent_file_name = self.target.path.basename(revent_file) host_path = os.path.join(output_path, revent_file_name) if os.path.exists(host_path): msg = 'Revent file \'{}\' already exists, overwrite? [y/n]' self.logger.info(msg.format(revent_file_name)) if input('') == 'y': os.remove(host_path) else: msg = 'Did not pull and overwrite \'{}\'' self.logger.warning(msg.format(revent_file_name)) return msg = 'Pulling \'{}\' from device' self.logger.info(msg.format(self.target.path.basename(revent_file))) self.target.pull(revent_file, output_path, as_root=self.target.is_rooted) def manual_record(self, args): output_path, file_name = self._split_revent_location(args.output) revent_file = self.target.get_workpath(file_name) self.record(revent_file, '', output_path) msg = 'Recording is available at: \'{}\'' self.logger.info(msg.format(os.path.join(output_path, file_name))) def package_record(self, args): if self.target.os != 'android' and self.target.os != 'chromeos': raise ConfigError('Target does not appear to be running Android') if self.target.os == 'chromeos' and not self.target.supports_android: raise ConfigError('Target does not appear to support Android') if args.clear: self.target.execute('pm clear {}'.format(args.package)) self.logger.info('Starting {}'.format(args.package)) cmd = 'monkey -p {} -c android.intent.category.LAUNCHER 1' self.target.execute(cmd.format(args.package)) output_path, file_name = self._split_revent_location(args.output) revent_file = self.target.get_workpath(file_name) self.record(revent_file, '', output_path) msg = 'Recording is available at: \'{}\'' self.logger.info(msg.format(os.path.join(output_path, file_name))) def workload_record(self, args): context = LightContext(self.tm) setup_revent = '{}.setup.revent'.format(self.target.model) run_revent = '{}.run.revent'.format(self.target.model) extract_results_revent = '{}.extract_results.revent'.format(self.target.model) teardown_file_revent = '{}.teardown.revent'.format(self.target.model) setup_file = self.target.get_workpath(setup_revent) run_file = self.target.get_workpath(run_revent) extract_results_file = self.target.get_workpath(extract_results_revent) teardown_file = self.target.get_workpath(teardown_file_revent) self.logger.info('Deploying {}'.format(args.workload)) workload = pluginloader.get_workload(args.workload, self.target) # Setup apk if android workload if hasattr(workload, 'apk'): workload.apk.initialize(context) workload.apk.setup(context) sleep(workload.loading_time) output_path = os.path.join(workload.dependencies_directory, 'revent_files') if args.setup or args.all: self.record(setup_file, 'SETUP', output_path) if args.run or args.all: self.record(run_file, 'RUN', output_path) if args.extract_results or args.all: self.record(extract_results_file, 'EXTRACT_RESULTS', output_path) if args.teardown or args.all: self.record(teardown_file, 'TEARDOWN', output_path) self.logger.info('Tearing down {}'.format(args.workload)) workload.teardown(context) self.logger.info('Recording(s) are available at: \'{}\''.format(output_path)) def _split_revent_location(self, output): output_path = None file_name = None if output: output_path, file_name, = os.path.split(output) if not file_name: file_name = '{}.revent'.format(self.target.model) if not output_path: output_path = os.getcwd() return output_path, file_name class ReplayCommand(Command): name = 'replay' description = ''' Replay a revent recording Revent allows you to record raw inputs such as screen swipes or button presses. See ``wa show record`` to see how to make an revent recording. ''' def initialize(self, context): self.parser.add_argument('recording', help='The name of the file to replay', metavar='FILE') self.parser.add_argument('-d', '--device', help='The name of the device') self.parser.add_argument('-p', '--package', help='Package to launch before recording') self.parser.add_argument('-C', '--clear', help='Clear app cache before launching it', action="store_true") # pylint: disable=W0201 def execute(self, state, args): state.run_config.merge_device_config(state.plugin_cache) if args.device: device = args.device device_config = {} else: device = state.run_config.device device_config = state.run_config.device_config or {} target_manager = TargetManager(device, device_config, None) target_manager.initialize() self.target = target_manager.target revent_file = self.target.path.join(self.target.working_directory, os.path.split(args.recording)[1]) self.logger.info("Pushing file to target") self.target.push(args.recording, self.target.working_directory) revent_recorder = ReventRecorder(target_manager.target) revent_recorder.deploy() if args.clear: self.target.execute('pm clear {}'.format(args.package)) if args.package: self.logger.info('Starting {}'.format(args.package)) cmd = 'monkey -p {} -c android.intent.category.LAUNCHER 1' self.target.execute(cmd.format(args.package)) self.logger.info("Starting replay") revent_recorder.replay(revent_file) self.logger.info("Finished replay") revent_recorder.remove() # Used to satisfy the workload API class LightContext(object): def __init__(self, tm): self.tm = tm self.resolver = ResourceResolver() self.resolver.load() def get_resource(self, resource, strict=True): return self.resolver.get(resource, strict) def update_metadata(self, key, *args): pass get = get_resource ================================================ FILE: wa/commands/run.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import shutil import wa from wa import Command, settings from wa.framework import pluginloader from wa.framework.configuration.parsers import AgendaParser from wa.framework.execution import Executor from wa.framework.output import init_run_output from wa.framework.exception import NotFoundError, ConfigError from wa.utils import log from wa.utils.types import toggle_set class RunCommand(Command): name = 'run' description = ''' Execute automated workloads on a remote device and process the resulting output. ''' def initialize(self, context): self.parser.add_argument('agenda', metavar='AGENDA', help=""" Agenda for this workload automation run. This defines which workloads will be executed, how many times, with which tunables, etc. See example agendas in {} for an example of how this file should be structured. """.format(os.path.dirname(wa.__file__))) self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None, help=""" Specify a directory where the output will be generated. If the directory already exists, the script will abort unless -f option (see below) is used, in which case the contents of the directory will be overwritten. If this option is not specified, then {} will be used instead. """.format(settings.default_output_directory)) self.parser.add_argument('-f', '--force', action='store_true', help=""" Overwrite output directory if it exists. By default, the script will abort in this situation to prevent accidental data loss. """) self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID', help=""" Specify a workload spec ID from an agenda to run. If this is specified, only that particular spec will be run, and other workloads in the agenda will be ignored. This option may be used to specify multiple IDs. """) self.parser.add_argument('--disable', action='append', dest='augmentations_to_disable', default=[], metavar='INSTRUMENT', help=""" Specify an instrument or output processor to disable from the command line. This equivalent to adding "~{metavar}" to the instruments list in the agenda. This can be used to temporarily disable a troublesome instrument for a particular run without introducing permanent change to the config (which one might then forget to revert). This option may be specified multiple times. """) def execute(self, config, args): # pylint: disable=arguments-differ output = self.set_up_output_directory(config, args) log.add_file(output.logfile) output.add_artifact('runlog', output.logfile, kind='log', description='Run log.') disabled_augmentations = toggle_set([i != '~~' and "~{}".format(i) or i for i in args.augmentations_to_disable]) config.jobs_config.disable_augmentations(disabled_augmentations) config.jobs_config.only_run_ids(args.only_run_ids) parser = AgendaParser() if os.path.isfile(args.agenda): includes = parser.load_from_path(config, args.agenda) shutil.copy(args.agenda, output.raw_config_dir) for inc in includes: shutil.copy(inc, output.raw_config_dir) else: try: pluginloader.get_plugin_class(args.agenda, kind='workload') agenda = {'workloads': [{'name': args.agenda}]} parser.load(config, agenda, 'CMDLINE_ARGS') except NotFoundError: msg = 'Agenda file "{}" does not exist, and there no workload '\ 'with that name.\nYou can get a list of available '\ 'by running "wa list workloads".' raise ConfigError(msg.format(args.agenda)) # Update run info with newly parsed config values output.info.project = config.run_config.project output.info.project_stage = config.run_config.project_stage output.info.run_name = config.run_config.run_name executor = Executor() executor.execute(config, output) def set_up_output_directory(self, config, args): if args.output_directory: output_directory = args.output_directory else: output_directory = settings.default_output_directory self.logger.debug('Using output directory: {}'.format(output_directory)) try: return init_run_output(output_directory, config, args.force) except RuntimeError as e: if 'path exists' in str(e): msg = 'Output directory "{}" exists.\nPlease specify another '\ 'location, or use -f option to overwrite.' self.logger.critical(msg.format(output_directory)) sys.exit(1) else: raise e ================================================ FILE: wa/commands/schema_changelog.rst ================================================ # 1 ## 1.0 - First version ## 1.1 - LargeObjects table added as a substitute for the previous plan to use the filesystem and a path reference to store artifacts. This was done following an extended discussion and tests that verified that the savings in processing power were not enough to warrant the creation of a dedicated server or file handler. ## 1.2 - Rename the `resourcegetters` table to `resource_getters` for consistency. - Add Job and Run level classifiers. - Add missing android specific properties to targets. - Add new POD meta data to relevant tables. - Correct job column name from `retires` to `retry`. - Add missing run information. ## 1.3 - Add missing "system_id" field from TargetInfo. - Enable support for uploading Artifact that represent directories. ## 1.4 - Add "modules" field to TargetInfo to list the modules loaded by the target during the run. ## 1.5 - Change the type of the "hostid" in TargetInfo from Int to Bigint. ## 1.6 - Add cascading deletes to most tables to allow easy deletion of a run and its associated data - Add rule to delete associated large object on deletion of artifact ================================================ FILE: wa/commands/show.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # TODO: because of some weirdness involving get_params_rst and underline # functions from wa.utils.doc, pylint gets stuck here for a very # long time. To avoid that, skip this file. # pylint: disable-all import sys import platform from subprocess import call, Popen, PIPE from devlib.utils.misc import escape_double_quotes from wa import Command from wa.framework import pluginloader from wa.framework.configuration.core import MetaConfiguration, RunConfiguration from wa.framework.exception import NotFoundError from wa.framework.target.descriptor import list_target_descriptions from wa.utils.types import caseless_string, identifier from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin, get_params_rst, underline) from wa.utils.misc import which class ShowCommand(Command): name = 'show' description = 'Display documentation for the specified plugin (workload, instrument, etc.).' def initialize(self, context): self.parser.add_argument('plugin', metavar='PLUGIN', help='The name of the plugin to display documentation for.') def execute(self, state, args): name = identifier(args.plugin) rst_output = None if name == caseless_string('settings'): rst_output = get_rst_for_global_config() rst_output += get_rst_for_envars() plugin_name = name.lower() kind = 'global:' else: try: plugin = pluginloader.get_plugin_class(name) except NotFoundError: plugin = None if plugin: rst_output = get_rst_from_plugin(plugin) plugin_name = plugin.name kind = '{}:'.format(plugin.kind) else: target = get_target_description(name) if target: rst_output = get_rst_from_target(target) plugin_name = target.name kind = 'target:' if not rst_output: raise NotFoundError('Could not find plugin or alias "{}"'.format(name)) if which('pandoc'): if platform.system() == "Darwin": # The version of `man` shipped with macOS does not support `-l`. You need to use GNU man from: # https://formulae.brew.sh/formula/man-db if which("gman") is None: print(rst_output) man = "gman" else: man = "man" p = Popen(['pandoc', '-f', 'rst', '-t', 'man'], stdin=PIPE, stdout=PIPE, stderr=PIPE) output, _ = p.communicate(rst_output.encode(sys.stdin.encoding)) output = output.decode(sys.stdout.encoding) # Make sure to double escape back slashes output = output.replace('\\', '\\\\\\') # Correctly format the title and page number of the man page title, body = output.split('\n', 1) title = '.TH {}{} 7'.format(kind, plugin_name) output = '\n'.join([title, body]) call('echo "{}" | {} -l -'.format(escape_double_quotes(output), man), shell=True) else: print(rst_output) # pylint: disable=superfluous-parens def get_target_description(name): targets = list_target_descriptions() for target in targets: if name == identifier(target.name): return target def get_rst_from_target(target): text = underline(target.name, '~') if hasattr(target, 'description'): desc = strip_inlined_text(target.description or '') text += desc text += underline('Device Parameters:', '-') text += get_params_rst(target.conn_params) text += get_params_rst(target.platform_params) text += get_params_rst(target.target_params) text += get_params_rst(target.assistant_params) text += '.. Note: For available runtime parameters please see the documentation' return text + '\n' def get_rst_for_global_config(): text = underline('Global Configuration') text += 'These parameters control the behaviour of WA/run as a whole, they ' \ 'should be set inside a config file (either located in ' \ '$WA_USER_DIRECTORY/config.yaml or one which is specified with -c), ' \ 'or into config/global section of the agenda.\n\n' cfg_points = MetaConfiguration.config_points + RunConfiguration.config_points text += get_params_rst(cfg_points) return text def get_rst_for_envars(): text = underline('Environment Variables') text += '''WA_USER_DIRECTORY: str This is the location WA will look for config.yaml, plugins, dependencies, and it will also be used for local caches, etc. If this variable is not set, the default location is ``~/.workload_automation`` (this is created when WA is installed). .. note.. This location must be writable by the user who runs WA.''' return text ================================================ FILE: wa/commands/templates/apk_workload ================================================ from wa import Parameter, ApkWorkload class ${class_name}(ApkWorkload): name = '${name}' description = "This is an placeholder description" # Replace with a list of supported package name(s) in the APK file(s). package_names = ['package_name'] parameters = [ # Workload parameters go here e.g. Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, description='This is an example parameter') ] def __init__(self, target, **kwargs): super(${class_name}, self).__init__(target, **kwargs) # Define any additional attributes required for the workload def init_resources(self, resolver): super(${class_name}, self).init_resources(resolver) # This method may be used to perform early resource discovery and # initialization. This is invoked during the initial loading stage and # before the device is ready, so cannot be used for any device-dependent # initialization. This method is invoked before the workload instance is # validated. def initialize(self, context): super(${class_name}, self).initialize(context) # This method should be used to perform once-per-run initialization of a # workload instance. def validate(self): super(${class_name}, self).validate() # Validate inter-parameter assumptions etc def setup(self, context): super(${class_name}, self).setup(context) # Perform any necessary setup before starting the application def setup_rerun(self, context): super(${class_name}, self).setup(context) # If the workloads has the `requires_rerun` attribute set to `True` this # method may be used to perform any necessary setup for the rerun of the # application. def extract_results(self, context): super(${class_name}, self).extract_results(context) # Extract results on the target def update_output(self, context): super(${class_name}, self).update_output(context) # Update the output within the specified execution context with the # metrics and artifacts form this workload iteration. def teardown(self, context): super(${class_name}, self).teardown(context) # Perform any final clean up for the Workload. ================================================ FILE: wa/commands/templates/apkrevent_workload ================================================ from wa import Parameter, ApkReventWorkload class ${class_name}(ApkReventWorkload): name = '${name}' description = "This is an placeholder description" # Replace with a list of supported package names in the APK file(s). package_names = ['package_name'] parameters = [ # Workload parameters go here e.g. Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, description='This is an example parameter') ] def __init__(self, target, **kwargs): super(${class_name}, self).__init__(target, **kwargs) # Define any additional attributes required for the workload def init_resources(self, resolver): super(${class_name}, self).init_resources(resolver) # This method may be used to perform early resource discovery and # initialization. This is invoked during the initial loading stage and # before the device is ready, so cannot be used for any device-dependent # initialization. This method is invoked before the workload instance is # validated. def initialize(self, context): super(${class_name}, self).initialize(context) # This method should be used to perform once-per-run initialization of a # workload instance. def validate(self): super(${class_name}, self).validate() # Validate inter-parameter assumptions etc def setup(self, context): super(${class_name}, self).setup(context) # Perform any necessary setup before starting the UI automation def setup_rerun(self, context): super(${class_name}, self).setup(context) # If the workloads has the `requires_rerun` attribute set to `True` this # method may be used to perform any necessary setup for the rerun of the # application. def extract_results(self, context): super(${class_name}, self).extract_results(context) # Extract results on the target def update_output(self, context): super(${class_name}, self).update_output(context) # Update the output within the specified execution context with the # metrics and artifacts form this workload iteration. def teardown(self, context): super(${class_name}, self).teardown(context) # Perform any final clean up for the Workload. ================================================ FILE: wa/commands/templates/apkuiauto_workload ================================================ from wa import Parameter, ApkUiautoWorkload class ${class_name}(ApkUiautoWorkload): name = '${name}' description = "This is an placeholder description" # Replace with a list of supported package names in the APK file(s). package_names = ['package_name'] parameters = [ # Workload parameters go here e.g. Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, description='This is an example parameter') ] def __init__(self, target, **kwargs): super(${class_name}, self).__init__(target, **kwargs) # Define any additional attributes required for the workload def init_resources(self, resolver): super(${class_name}, self).init_resources(resolver) # This method may be used to perform early resource discovery and # initialization. This is invoked during the initial loading stage and # before the device is ready, so cannot be used for any device-dependent # initialization. This method is invoked before the workload instance is # validated. def initialize(self, context): super(${class_name}, self).initialize(context) # This method should be used to perform once-per-run initialization of a # workload instance. def validate(self): super(${class_name}, self).validate() # Validate inter-parameter assumptions etc def setup(self, context): super(${class_name}, self).setup(context) # Perform any necessary setup before starting the UI automation def setup_rerun(self, context): super(${class_name}, self).setup(context) # If the workloads has the `requires_rerun` attribute set to `True` this # method may be used to perform any necessary setup for the rerun of the # application. def extract_results(self, context): super(${class_name}, self).extract_results(context) # Extract results on the target def update_output(self, context): super(${class_name}, self).update_output(context) # Update the output within the specified execution context with the # metrics and artifacts form this workload iteration. def teardown(self, context): super(${class_name}, self).teardown(context) # Perform any final clean up for the Workload. ================================================ FILE: wa/commands/templates/basic_workload ================================================ from wa import Parameter, Workload class ${class_name}(Workload): name = '${name}' description = "This is an placeholder description" parameters = [ # Workload parameters go here e.g. Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, description='This is an example parameter') ] def __init__(self, target, **kwargs): super(${class_name}, self).__init__(target, **kwargs) # Define any additional attributes required for the workload def init_resources(self, resolver): super(${class_name}, self).init_resources(resolver) # This method may be used to perform early resource discovery and # initialization. This is invoked during the initial loading stage and # before the device is ready, so cannot be used for any device-dependent # initialization. This method is invoked before the workload instance is # validated. def initialize(self, context): super(${class_name}, self).initialize(context) # This method should be used to perform once-per-run initialization of a # workload instance. def validate(self): super(${class_name}, self).validate() # Validate inter-parameter assumptions etc def setup(self, context): super(${class_name}, self).setup(context) # Perform any necessary setup before starting the workload def run(self, context): super(${class_name}, self).run(context) # Perform the main functionality of the workload def extract_results(self, context): super(${class_name}, self).extract_results(context) # Extract results on the target def update_output(self, context): super(${class_name}, self).update_output(context) # Update the output within the specified execution context with the # metrics and artifacts form this workload iteration. def teardown(self, context): super(${class_name}, self).teardown(context) # Perform any final clean up for the Workload. ================================================ FILE: wa/commands/templates/revent_workload ================================================ from wa import Parameter, ReventWorkload class ${class_name}(ReventWorkload): name = '${name}' description = "This is an placeholder description" parameters = [ # Workload parameters go here e.g. Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, description='This is an example parameter') ] def __init__(self, target, **kwargs): super(${class_name}, self).__init__(target, **kwargs) # Define any additional attributes required for the workload def init_resources(self, resolver): super(${class_name}, self).init_resources(resolver) # This method may be used to perform early resource discovery and # initialization. This is invoked during the initial loading stage and # before the device is ready, so cannot be used for any device-dependent # initialization. This method is invoked before the workload instance is # validated. def initialize(self, context): super(${class_name}, self).initialize(context) # This method should be used to perform once-per-run initialization of a # workload instance. def validate(self): super(${class_name}, self).validate() # Validate inter-parameter assumptions etc def setup(self, context): super(${class_name}, self).setup(context) # Perform any necessary setup before starting the UI automation def run(self, context): super(${class_name}, self).run(context) # Perform the main functionality of the workload def extract_results(self, context): super(${class_name}, self).extract_results(context) # Extract results on the target def update_output(self, context): super(${class_name}, self).update_output(context) # Update the output within the specified execution context with the # metrics and artifacts form this workload iteration. def teardown(self, context): super(${class_name}, self).teardown(context) # Perform any final clean up for the Workload. ================================================ FILE: wa/commands/templates/setup.template ================================================ import os import sys import warnings from multiprocessing import Process try: from setuptools.command.install import install as orig_install from setuptools import setup except ImportError: from distutils.command.install import install as orig_install from distutils.core import setup try: import pwd except ImportError: pwd = None warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'") try: os.remove('MANIFEST') except OSError: pass packages = [] data_files = {} source_dir = os.path.dirname(__file__) for root, dirs, files in os.walk('$package_name'): rel_dir = os.path.relpath(root, source_dir) data = [] if '__init__.py' in files: for f in files: if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']: data.append(f) package_name = rel_dir.replace(os.sep, '.') package_dir = root packages.append(package_name) data_files[package_name] = data else: # use previous package name filepaths = [os.path.join(root, f) for f in files] data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths]) params = dict( name='$package_name', version='0.0.1', packages=packages, package_data=data_files, url='N/A', maintainer='$user', maintainer_email='$user@example.com', install_requires=[ 'wa', ], # https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'License :: Other/Proprietary License', 'Operating System :: Unix', 'Programming Language :: Python :: 3', ], ) def update_wa_packages(): sudo_user = os.getenv('SUDO_USER') if sudo_user: user_entry = pwd.getpwnam(sudo_user) os.setgid(user_entry.pw_gid) os.setuid(user_entry.pw_uid) env_root = os.getenv('WA_USER_DIRECTORY', os.path.join(os.path.expanduser('~'), '.workload_automation')) if not os.path.isdir(env_root): os.makedirs(env_root) wa_packages_file = os.path.join(env_root, 'packages') if os.path.isfile(wa_packages_file): with open(wa_packages_file, 'r') as wfh: package_list = wfh.read().split() if params['name'] not in package_list: package_list.append(params['name']) else: # no existing package file package_list = [params['name']] with open(wa_packages_file, 'w') as wfh: wfh.write('\n'.join(package_list)) class install(orig_install): def run(self): orig_install.run(self) # Must be done in a separate process because will drop privileges if # sudo, and won't be able to reacquire them. p = Process(target=update_wa_packages) p.start() p.join() params['cmdclass'] = {'install': install} setup(**params) ================================================ FILE: wa/commands/templates/uiauto/UiAutomation.java ================================================ package ${package_name}; import android.app.Activity; import android.os.Bundle; import org.junit.Test; import org.junit.runner.RunWith; import android.support.test.runner.AndroidJUnit4; import android.util.Log; import android.view.KeyEvent; // Import the uiautomator libraries import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiScrollable; import android.support.test.uiautomator.UiSelector; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import com.arm.wa.uiauto.BaseUiAutomation; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation { protected Bundle parameters; protected String packageID; protected int example_parameter; public static String TAG = "${name}"; @Before public void initialize() throws Exception { // Perform any parameter initialization here parameters = getParams(); packageID = getPackageID(parameters); example_parameter = parameters.getInt("example_parameter"); } @Test public void setup() throws Exception { // Optional: Perform any setup required before the main workload // is ran, e.g. dismissing welcome screens } @Test public void runWorkload() throws Exception { // The main UI Automation code goes here } @Test public void extractResults() throws Exception { // Optional: Extract any relevant results from the workload, } @Test public void teardown() throws Exception { // Optional: Perform any clean up for the workload } } ================================================ FILE: wa/commands/templates/uiauto/uiauto_AndroidManifest.xml ================================================ ================================================ FILE: wa/commands/templates/uiauto/uiauto_build.gradle ================================================ apply plugin: 'com.android.application' android { compileSdkVersion 28 buildToolsVersion '28.0.0' defaultConfig { applicationId "${package_name}" minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "${package_name}.apk" } } } } dependencies { compile fileTree(include: ['*.jar'], dir: 'libs') compile 'com.android.support.test:runner:0.5' compile 'com.android.support.test:rules:0.5' compile 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' compile(name: 'uiauto', ext: 'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/commands/templates/uiauto/uiauto_build_script ================================================ #!/bin/bash # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $$0 &>/dev/null`; then script_path=`readlink -f $$0 2>/dev/null` fi script_dir=`dirname $$script_path` cd $$script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wlauto dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $$libs_dir cp $$base_class $$libs_dir # Build and return appropriate exit code if failed # gradle build ./gradlew clean :app:assembleDebug exit_code=$$? if [[ $$exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $$exit_code" exit $$exit_code fi # If successful move APK file to workload folder (overwrite previous) rm -f ../$package_name if [[ -f app/build/outputs/apk/debug/$package_name.apk ]]; then cp app/build/outputs/apk/debug/$package_name.apk ../$package_name.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/commands/templates/uiauto/uiauto_workload_template/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/commands/templates/uiauto/uiauto_workload_template/gradle/wrapper/gradle-wrapper.properties ================================================ #Wed May 03 15:42:44 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/commands/templates/uiauto/uiauto_workload_template/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/commands/templates/uiauto/uiauto_workload_template/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/commands/templates/uiauto/uiauto_workload_template/settings.gradle ================================================ include ':app' ================================================ FILE: wa/commands/templates/uiauto_workload ================================================ from wa import Parameter, UiautoWorkload class ${class_name}(UiautoWorkload): name = '${name}' description = "This is an placeholder description" # Replace with a list of supported package names from the APK file(s) package_names = ['package_name'] parameters = [ # Workload parameters go here e.g. Parameter('example_parameter', kind=int, allowed_values=[1,2,3], default=1, override=True, mandatory=False, description='This is an example parameter') ] def __init__(self, target, **kwargs): super(${class_name}, self).__init__(target, **kwargs) # Define any additional attributes required for the workload def init_resources(self, resolver): super(${class_name}, self).init_resources(resolver) # This method may be used to perform early resource discovery and # initialization. This is invoked during the initial loading stage and # before the device is ready, so cannot be used for any device-dependent # initialization. This method is invoked before the workload instance is # validated. def initialize(self, context): super(${class_name}, self).initialize(context) # This method should be used to perform once-per-run initialization of a # workload instance. def validate(self): super(${class_name}, self).validate() # Validate inter-parameter assumptions etc def setup(self, context): super(${class_name}, self).setup(context) # Perform any necessary setup before starting the UI automation def run(self, context): super(${class_name}, self).run(context) # Perform the main functionality of the workload def extract_results(self, context): super(${class_name}, self).extract_results(context) # Extract results on the target def update_output(self, context): super(${class_name}, self).update_output(context) # Update the output within the specified execution context with the # metrics and artifacts form this workload iteration. def teardown(self, context): super(${class_name}, self).teardown(context) # Perform any final clean up for the Workload. ================================================ FILE: wa/framework/__init__.py ================================================ ================================================ FILE: wa/framework/command.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import textwrap from wa.framework.exception import CommandError from wa.framework.plugin import Plugin from wa.framework.version import get_wa_version from wa.utils.doc import format_body def init_argument_parser(parser): parser.add_argument('-c', '--config', action='append', default=[], help='specify an additional config.yaml') parser.add_argument('-v', '--verbose', action='count', help='The scripts will produce verbose output.') parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version())) return parser class SubCommand(object): """ Defines a Workload Automation command. This will be executed from the command line as ``wa [args ...]``. This defines the name to be used when invoking wa, the code that will actually be executed on invocation and the argument parser to be used to parse the reset of the command line arguments. """ name = None help = None usage = None description = None epilog = None formatter_class = None def __init__(self, logger, subparsers): self.logger = logger self.group = subparsers desc = format_body(textwrap.dedent(self.description), 80) parser_params = dict(help=(self.help or self.description), usage=self.usage, description=desc, epilog=self.epilog) if self.formatter_class: parser_params['formatter_class'] = self.formatter_class self.parser = subparsers.add_parser(self.name, **parser_params) init_argument_parser(self.parser) # propagate top-level options self.initialize(None) def initialize(self, context): """ Perform command-specific initialisation (e.g. adding command-specific options to the command's parser). ``context`` is always ``None``. """ def execute(self, state, args): """ Execute this command. :state: An initialized ``ConfigManager`` that contains the current state of WA exeuction up to that point (processed configuraition, loaded plugins, etc). :args: An ``argparse.Namespace`` containing command line arguments (as returned by ``argparse.ArgumentParser.parse_args()``. This would usually be the result of invoking ``self.parser``. """ raise NotImplementedError() class Command(Plugin, SubCommand): # pylint: disable=abstract-method """ Defines a Workload Automation command. This will be executed from the command line as ``wa [args ...]``. This defines the name to be used when invoking wa, the code that will actually be executed on invocation and the argument parser to be used to parse the reset of the command line arguments. """ kind = "command" def __init__(self, subparsers): Plugin.__init__(self) SubCommand.__init__(self, self.logger, subparsers) class ComplexCommand(Command): """ A command that defines sub-commands. """ subcmd_classes = [] def __init__(self, subparsers): self.subcommands = [] super(ComplexCommand, self).__init__(subparsers) def initialize(self, context): subparsers = self.parser.add_subparsers(dest='what', metavar='SUBCMD') subparsers.required = True for subcmd_cls in self.subcmd_classes: subcmd = subcmd_cls(self.logger, subparsers) self.subcommands.append(subcmd) def execute(self, state, args): for subcmd in self.subcommands: if subcmd.name == args.what: subcmd.execute(state, args) break else: raise CommandError('Not a valid create parameter: {}'.format(args.name)) ================================================ FILE: wa/framework/configuration/__init__.py ================================================ # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa.framework.configuration.core import (settings, RunConfiguration, JobGenerator, ConfigurationPoint) ================================================ FILE: wa/framework/configuration/core.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging from copy import copy, deepcopy from collections import OrderedDict, defaultdict from wa.framework.exception import ConfigError, NotFoundError from wa.framework.configuration.tree import SectionNode from wa.utils import log from wa.utils.misc import (get_article, merge_config_values) from wa.utils.types import (identifier, integer, boolean, list_of_strings, list_of, toggle_set, obj_dict, enum) from wa.utils.serializer import is_pod, Podable # Mapping for kind conversion; see docs for convert_types below KIND_MAP = { int: integer, bool: boolean, dict: OrderedDict, } Status = enum(['UNKNOWN', 'NEW', 'PENDING', 'STARTED', 'CONNECTED', 'INITIALIZED', 'RUNNING', 'OK', 'PARTIAL', 'FAILED', 'ABORTED', 'SKIPPED']) logger = logging.getLogger('config') ########################## ### CONFIG POINT TYPES ### ########################## class RebootPolicy(object): """ Represents the reboot policy for the execution -- at what points the device should be rebooted. This, in turn, is controlled by the policy value that is passed in on construction and would typically be read from the user's settings. Valid policy values are: :never: The device will never be rebooted. :as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc. :initial: The device will be rebooted when the execution first starts, just before executing the first workload spec. :each_spec: The device will be rebooted before running a new workload spec. :each_iteration: The device will be rebooted before each new iteration. :run_completion: The device will be rebooted after the run has been completed. """ valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_job', 'run_completion'] @staticmethod def from_pod(pod): return RebootPolicy(pod) def __init__(self, policy): if isinstance(policy, RebootPolicy): policy = policy.policy policy = policy.strip().lower().replace(' ', '_') if policy not in self.valid_policies: message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies)) raise ConfigError(message) self.policy = policy @property def can_reboot(self): return self.policy != 'never' @property def perform_initial_reboot(self): return self.policy == 'initial' @property def reboot_on_each_job(self): return self.policy == 'each_job' @property def reboot_on_each_spec(self): return self.policy == 'each_spec' @property def reboot_on_run_completion(self): return self.policy == 'run_completion' def __str__(self): return self.policy __repr__ = __str__ def __eq__(self, other): if isinstance(other, RebootPolicy): return self.policy == other.policy else: return self.policy == other def to_pod(self): return self.policy class status_list(list): def append(self, item): list.append(self, str(item).upper()) class LoggingConfig(Podable, dict): _pod_serialization_version = 1 defaults = { 'file_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s', 'verbose_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s', 'regular_format': '%(levelname)-8s %(message)s', 'color': True, } @staticmethod def from_pod(pod): pod = LoggingConfig._upgrade_pod(pod) pod_version = pod.pop('_pod_version') instance = LoggingConfig(pod) instance._pod_version = pod_version # pylint: disable=protected-access return instance def __init__(self, config=None): super(LoggingConfig, self).__init__() dict.__init__(self) if isinstance(config, dict): config = {identifier(k.lower()): v for k, v in config.items()} self['regular_format'] = config.pop('regular_format', self.defaults['regular_format']) self['verbose_format'] = config.pop('verbose_format', self.defaults['verbose_format']) self['file_format'] = config.pop('file_format', self.defaults['file_format']) self['color'] = config.pop('colour_enabled', self.defaults['color']) # legacy self['color'] = config.pop('color', self.defaults['color']) if config: message = 'Unexpected logging configuration parameters: {}' raise ValueError(message.format(bad_vals=', '.join(list(config.keys())))) elif config is None: for k, v in self.defaults.items(): self[k] = v else: raise ValueError(config) def to_pod(self): pod = super(LoggingConfig, self).to_pod() pod.update(self) return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod def expanded_path(path): """ Ensure that the provided path has been expanded if applicable """ return os.path.expanduser(str(path)) def get_type_name(kind): typename = str(kind) if '\'' in typename: typename = typename.split('\'')[1] elif typename.startswith('`) rather than in the config file. ''', ), ConfigurationPoint( 'project', kind=str, description=''' A string naming the project for which data is being collected. This may be useful, e.g. when uploading data to a shared database that is populated from multiple projects. ''', ), ConfigurationPoint( 'project_stage', kind=dict, description=''' A dict or a string that allows adding additional identifier. This is may be useful for long-running projects. ''', ), ] config_points = [ ConfigurationPoint( 'execution_order', kind=str, default='by_iteration', allowed_values=['by_iteration', 'by_section', 'by_workload', 'random'], description=''' Defines the order in which the agenda spec will be executed. At the moment, the following execution orders are supported: ``"by_iteration"`` The first iteration of each workload spec is executed one after the other, so all workloads are executed before proceeding on to the second iteration. E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified. In case of multiple sections, this will spread them out, such that specs from the same section are further part. E.g. given sections X and Y, global specs A and B, and two iterations, this will run :: X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2 ``"by_section"`` Same as ``"by_iteration"``, however this will group specs from the same section together, so given sections X and Y, global specs A and B, and two iterations, this will run :: X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2 ``"by_workload"`` All iterations of the first spec are executed before moving on to the next spec. E.g:: X.A1, X.A2, Y.A1, Y.A2, X.B1, X.B2, Y.B1, Y.B2 ``"random"`` Execution order is entirely random. ''', ), ConfigurationPoint( 'reboot_policy', kind=RebootPolicy, default='as_needed', allowed_values=RebootPolicy.valid_policies, description=''' This defines when during execution of a run the Device will be rebooted. The possible values are: ``"as_needed"`` The device will only be rebooted if the need arises (e.g. if it becomes unresponsive. ``"never"`` The device will never be rebooted. ``"initial"`` The device will be rebooted when the execution first starts, just before executing the first workload spec. ``"each_job"`` The device will be rebooted before each new job. ``"each_spec"`` The device will be rebooted before running a new workload spec. .. note:: This acts the same as ``each_job`` when execution order is set to by_iteration ``"run_completion"`` The device will be rebooted after the run has been completed. '''), ConfigurationPoint( 'device', kind=str, default='generic_android', description=''' This setting defines what specific ``Device`` subclass will be used to interact the connected device. Obviously, this must match your setup. ''', ), ConfigurationPoint( 'retry_on_status', kind=list_of(Status), default=['FAILED', 'PARTIAL'], allowed_values=Status.levels[Status.RUNNING.value:], description=''' This is list of statuses on which a job will be considered to have failed and will be automatically retried up to ``max_retries`` times. This defaults to ``["FAILED", "PARTIAL"]`` if not set. Possible values are: ``"OK"`` This iteration has completed and no errors have been detected ``"PARTIAL"`` One or more instruments have failed (the iteration may still be running). ``"FAILED"`` The workload itself has failed. ``"ABORTED"`` The user interrupted the workload. ''', ), ConfigurationPoint( 'max_retries', kind=int, default=2, description=''' The maximum number of times failed jobs will be retried before giving up. .. note:: This number does not include the original attempt ''', ), ConfigurationPoint( 'bail_on_init_failure', kind=bool, default=True, description=''' When jobs fail during their main setup and run phases, WA will continue attempting to run the remaining jobs. However, by default, if they fail during their early initialization phase, the entire run will end without continuing to run jobs. Setting this to ``False`` means that WA will instead skip all the jobs from the job spec that failed, but continue attempting to run others. ''' ), ConfigurationPoint( 'bail_on_job_failure', kind=bool, default=False, description=''' When a job fails during its run phase, WA will attempt to retry the job, then continue with remaining jobs after. Setting this to ``True`` means WA will skip remaining jobs and end the run if a job has retried the maximum number of times, and still fails. ''' ), ConfigurationPoint( 'allow_phone_home', kind=bool, default=True, description=''' Setting this to ``False`` prevents running any workloads that are marked with 'phones_home', meaning they are at risk of exposing information about the device to the outside world. For example, some benchmark applications upload device data to a database owned by the maintainers. This can be used to minimise the risk of accidentally running such workloads when testing confidential devices. '''), ] configuration = {cp.name: cp for cp in config_points + meta_data} @classmethod def from_pod(cls, pod): meta_pod = {} for cfg_point in cls.meta_data: meta_pod[cfg_point.name] = pod.pop(cfg_point.name, None) device_config = pod.pop('device_config', None) augmentations = pod.pop('augmentations', {}) getters = pod.pop('resource_getters', {}) instance = super(RunConfiguration, cls).from_pod(pod) instance.device_config = device_config instance.augmentations = augmentations instance.resource_getters = getters for cfg_point in cls.meta_data: cfg_point.set_value(instance, meta_pod[cfg_point.name]) return instance def __init__(self): super(RunConfiguration, self).__init__() for confpoint in self.meta_data: confpoint.set_value(self, check_mandatory=False) self.device_config = None self.augmentations = {} self.resource_getters = {} def merge_device_config(self, plugin_cache): """ Merges global device config and validates that it is correct for the selected device. """ # pylint: disable=no-member if self.device is None: msg = 'Attempting to merge device config with unspecified device' raise RuntimeError(msg) self.device_config = plugin_cache.get_plugin_config(self.device, generic_name="device_config") def add_augmentation(self, aug): if aug.name in self.augmentations: raise ValueError('Augmentation "{}" already added.'.format(aug.name)) self.augmentations[aug.name] = aug.get_config() def add_resource_getter(self, getter): if getter.name in self.resource_getters: raise ValueError('Resource getter "{}" already added.'.format(getter.name)) self.resource_getters[getter.name] = getter.get_config() def to_pod(self): pod = super(RunConfiguration, self).to_pod() pod['device_config'] = dict(self.device_config or {}) pod['augmentations'] = self.augmentations pod['resource_getters'] = self.resource_getters return pod class JobSpec(Configuration): # pylint: disable=access-member-before-definition,attribute-defined-outside-init name = "Job Spec" config_points = [ ConfigurationPoint('iterations', kind=int, default=1, description=''' How many times to repeat this workload spec '''), ConfigurationPoint('workload_name', kind=str, mandatory=True, aliases=["name"], description=''' The name of the workload to run. '''), ConfigurationPoint('workload_parameters', kind=obj_dict, merge=True, aliases=["params", "workload_params", "parameters"], description=''' Parameter to be passed to the workload '''), ConfigurationPoint('runtime_parameters', kind=obj_dict, merge=True, aliases=["runtime_params"], description=''' Runtime parameters to be set prior to running the workload. '''), ConfigurationPoint('boot_parameters', kind=obj_dict, aliases=["boot_params"], description=''' Parameters to be used when rebooting the target prior to running the workload. '''), ConfigurationPoint('label', kind=str, description=''' Similar to IDs but do not have the uniqueness restriction. If specified, labels will be used by some output processors instead of (or in addition to) the workload name. For example, the csv output processor will put the label in the "workload" column of the CSV file. '''), ConfigurationPoint('augmentations', kind=toggle_set, merge=True, aliases=["instruments", "processors", "instrumentation", "output_processors", "augment", "result_processors"], description=''' The instruments and output processors to enable (or disabled using a ~) during this workload spec. This combines the "instrumentation" and "result_processors" from previous versions of WA (the old entries are now aliases for this). '''), ConfigurationPoint('flash', kind=dict, merge=True, description=''' '''), ConfigurationPoint('classifiers', kind=dict, merge=True, description=''' Classifiers allow you to tag metrics from this workload spec to help in post processing them. Theses are often used to help identify what runtime_parameters were used for results when post processing. '''), ] configuration = {cp.name: cp for cp in config_points} @classmethod def from_pod(cls, pod): job_id = pod.pop('id') instance = super(JobSpec, cls).from_pod(pod) instance.id = job_id return instance @property def section_id(self): if self.id is not None: return self.id.rsplit('-', 1)[0] @property def workload_id(self): if self.id is not None: return self.id.rsplit('-', 1)[-1] def __init__(self): super(JobSpec, self).__init__() if self.classifiers is None: self.classifiers = OrderedDict() self.to_merge = defaultdict(OrderedDict) self._sources = [] self.id = None if self.boot_parameters is None: self.boot_parameters = obj_dict() if self.runtime_parameters is None: self.runtime_parameters = obj_dict() def to_pod(self): pod = super(JobSpec, self).to_pod() pod['id'] = self.id return pod def update_config(self, source, check_mandatory=True): # pylint: disable=arguments-differ self._sources.append(source) values = source.config for k, v in values.items(): if k == "id": continue elif k.endswith('_parameters'): if v: self.to_merge[k][source] = copy(v) else: try: self.set(k, v, check_mandatory=check_mandatory) except ConfigError as e: msg = 'Error in {}:\n\t{}' raise ConfigError(msg.format(source.name, e.message)) def merge_workload_parameters(self, plugin_cache): # merge global generic and specific config workload_params = plugin_cache.get_plugin_config(self.workload_name, generic_name="workload_parameters", is_final=False) cfg_points = plugin_cache.get_plugin_parameters(self.workload_name) for source in self._sources: config = dict(self.to_merge["workload_parameters"].get(source, {})) if not config: continue for name, cfg_point in cfg_points.items(): if name in config: value = config.pop(name) cfg_point.set_value(workload_params, value, check_mandatory=False) if config: msg = 'Unexpected config "{}" for "{}"' raise ConfigError(msg.format(config, self.workload_name)) self.workload_parameters = workload_params def merge_runtime_parameters(self, plugin_cache, target_manager): # Order global runtime parameters runtime_parameters = OrderedDict() try: global_runtime_params = plugin_cache.get_plugin_config("runtime_parameters") except NotFoundError: global_runtime_params = {} for source in plugin_cache.sources: if source in global_runtime_params: runtime_parameters[source] = global_runtime_params[source] # Add runtime parameters from JobSpec for source, values in self.to_merge['runtime_parameters'].items(): runtime_parameters[source] = values # Merge self.runtime_parameters = target_manager.merge_runtime_parameters(runtime_parameters) def finalize(self): self.id = "-".join([str(source.config['id']) for source in self._sources[1:]]) # ignore first id, "global" # ensure *_parameters are always obj_dict's self.boot_parameters = obj_dict(list((self.boot_parameters or {}).items())) self.runtime_parameters = obj_dict(list((self.runtime_parameters or {}).items())) self.workload_parameters = obj_dict(list((self.workload_parameters or {}).items())) if self.label is None: self.label = self.workload_name # This is used to construct the list of Jobs WA will run class JobGenerator(object): name = "Jobs Configuration" @property def enabled_instruments(self): self._read_augmentations = True if self._enabled_instruments is None: self._enabled_instruments = [] for entry in list(self._enabled_augmentations.merge_with(self.disabled_augmentations).values()): entry_cls = self.plugin_cache.get_plugin_class(entry) if entry_cls.kind == 'instrument': self._enabled_instruments.append(entry) return self._enabled_instruments @property def enabled_processors(self): self._read_augmentations = True if self._enabled_processors is None: self._enabled_processors = [] for entry in list(self._enabled_augmentations.merge_with(self.disabled_augmentations).values()): entry_cls = self.plugin_cache.get_plugin_class(entry) if entry_cls.kind == 'output_processor': self._enabled_processors.append(entry) return self._enabled_processors def __init__(self, plugin_cache): self.plugin_cache = plugin_cache self.ids_to_run = [] self.workloads = [] self._enabled_augmentations = toggle_set() self._enabled_instruments = None self._enabled_processors = None self._read_augmentations = False self.disabled_augmentations = set() self.job_spec_template = obj_dict(not_in_dict=['name']) self.job_spec_template.name = "globally specified job spec configuration" self.job_spec_template.id = "global" # Load defaults for cfg_point in JobSpec.configuration.values(): cfg_point.set_value(self.job_spec_template, check_mandatory=False) self.root_node = SectionNode(self.job_spec_template) def set_global_value(self, name, value): JobSpec.configuration[name].set_value(self.job_spec_template, value, check_mandatory=False) if name == "augmentations": self.update_augmentations(value) def add_section(self, section, workloads, group): new_node = self.root_node.add_section(section, group) with log.indentcontext(): for workload in workloads: new_node.add_workload(workload) def add_workload(self, workload): self.root_node.add_workload(workload) def disable_augmentations(self, augmentations): for entry in augmentations: if entry == '~~': continue if entry.startswith('~'): entry = entry[1:] try: self.plugin_cache.get_plugin_class(entry) except NotFoundError: raise ConfigError('Error disabling unknown augmentation: "{}"'.format(entry)) self.disabled_augmentations = self.disabled_augmentations.union(augmentations) def update_augmentations(self, value): if self._read_augmentations: msg = 'Cannot update augmentations after they have been accessed' raise RuntimeError(msg) self._enabled_augmentations = self._enabled_augmentations.merge_with(value) def only_run_ids(self, ids): if isinstance(ids, str): ids = [ids] self.ids_to_run = ids def generate_job_specs(self, target_manager): specs = [] for leaf in self.root_node.leaves(): workload_entries = leaf.workload_entries sections = [leaf] for ancestor in leaf.ancestors(): workload_entries = ancestor.workload_entries + workload_entries sections.insert(0, ancestor) for workload_entry in workload_entries: job_spec = create_job_spec(deepcopy(workload_entry), sections, target_manager, self.plugin_cache, self.disabled_augmentations) if self.ids_to_run: for job_id in self.ids_to_run: if job_id in job_spec.id: break else: continue self.update_augmentations(list(job_spec.augmentations.values())) specs.append(job_spec) return specs def create_job_spec(workload_entry, sections, target_manager, plugin_cache, disabled_augmentations): job_spec = JobSpec() # PHASE 2.1: Merge general job spec configuration for section in sections: job_spec.update_config(section, check_mandatory=False) # Add classifiers for any present groups if section.id == 'global' or section.group is None: # Ignore global config and default group continue job_spec.classifiers[section.group] = section.id job_spec.update_config(workload_entry, check_mandatory=False) # PHASE 2.2: Merge global, section and workload entry "workload_parameters" job_spec.merge_workload_parameters(plugin_cache) # TODO: PHASE 2.3: Validate device runtime/boot parameters job_spec.merge_runtime_parameters(plugin_cache, target_manager) target_manager.validate_runtime_parameters(job_spec.runtime_parameters) # PHASE 2.4: Disable globally disabled augmentations job_spec.set("augmentations", disabled_augmentations) job_spec.finalize() return job_spec def get_config_point_map(params): pmap = {} for p in params: pmap[p.name] = p for alias in p.aliases: pmap[alias] = p return pmap settings = MetaConfiguration(os.environ) ================================================ FILE: wa/framework/configuration/default.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa.framework.configuration.core import MetaConfiguration, RunConfiguration from wa.framework.configuration.plugin_cache import PluginCache from wa.utils.serializer import yaml from wa.utils.doc import strip_inlined_text DEFAULT_AUGMENTATIONS = [ 'execution_time', 'interrupts', 'cpufreq', 'status', 'csv', ] def _format_yaml_comment(param, short_description=False): comment = param.description comment = strip_inlined_text(comment) if short_description: comment = comment.split('\n\n')[0] comment = comment.replace('\n', '\n# ') comment = "# {}\n".format(comment) return comment def _format_augmentations(output): plugin_cache = PluginCache() output.write("augmentations:\n") for plugin in DEFAULT_AUGMENTATIONS: plugin_cls = plugin_cache.loader.get_plugin_class(plugin) output.writelines(_format_yaml_comment(plugin_cls, short_description=True)) output.write(" - {}\n".format(plugin)) output.write("\n") def generate_default_config(path): with open(path, 'w') as output: for param in MetaConfiguration.config_points + RunConfiguration.config_points: entry = {param.name: param.default} write_param_yaml(entry, param, output) _format_augmentations(output) def write_param_yaml(entry, param, output): comment = _format_yaml_comment(param) output.writelines(comment) yaml.dump(entry, output, default_flow_style=False) output.write("\n") ================================================ FILE: wa/framework/configuration/execution.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import random from itertools import groupby, chain from future.moves.itertools import zip_longest from devlib.utils.types import identifier from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration, JobGenerator, settings) from wa.framework.configuration.parsers import ConfigParser from wa.framework.configuration.plugin_cache import PluginCache from wa.framework.exception import NotFoundError, ConfigError from wa.framework.job import Job from wa.utils import log from wa.utils.serializer import Podable class CombinedConfig(Podable): _pod_serialization_version = 1 @staticmethod def from_pod(pod): instance = super(CombinedConfig, CombinedConfig).from_pod(pod) instance.settings = MetaConfiguration.from_pod(pod.get('settings', {})) instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {})) return instance def __init__(self, settings=None, run_config=None): # pylint: disable=redefined-outer-name super(CombinedConfig, self).__init__() self.settings = settings self.run_config = run_config def to_pod(self): pod = super(CombinedConfig, self).to_pod() pod['settings'] = self.settings.to_pod() pod['run_config'] = self.run_config.to_pod() return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod class ConfigManager(object): """ Represents run-time state of WA. Mostly used as a container for loaded configuration and discovered plugins. This exists outside of any command or run and is associated with the running instance of wA itself. """ @property def enabled_instruments(self): return self.jobs_config.enabled_instruments @property def enabled_processors(self): return self.jobs_config.enabled_processors @property def job_specs(self): if not self._jobs_generated: msg = 'Attempting to access job specs before '\ 'jobs have been generated' raise RuntimeError(msg) return [j.spec for j in self._jobs] @property def jobs(self): if not self._jobs_generated: msg = 'Attempting to access jobs before '\ 'they have been generated' raise RuntimeError(msg) return self._jobs def __init__(self, settings=settings): # pylint: disable=redefined-outer-name self.settings = settings self.run_config = RunConfiguration() self.plugin_cache = PluginCache() self.jobs_config = JobGenerator(self.plugin_cache) self.loaded_config_sources = [] self._config_parser = ConfigParser() self._jobs = [] self._jobs_generated = False self.agenda = None def load_config_file(self, filepath): includes = self._config_parser.load_from_path(self, filepath) self.loaded_config_sources.append(filepath) self.loaded_config_sources.extend(includes) def load_config(self, values, source): self._config_parser.load(self, values, source) self.loaded_config_sources.append(source) def get_plugin(self, name=None, kind=None, *args, **kwargs): return self.plugin_cache.get_plugin(identifier(name), kind, *args, **kwargs) def get_instruments(self, target): instruments = [] for name in self.enabled_instruments: try: instruments.append(self.get_plugin(name, kind='instrument', target=target)) except NotFoundError: msg = 'Instrument "{}" not found' raise NotFoundError(msg.format(name)) return instruments def get_processors(self): processors = [] for name in self.enabled_processors: try: proc = self.plugin_cache.get_plugin(name, kind='output_processor') except NotFoundError: msg = 'Output Processor "{}" not found' raise NotFoundError(msg.format(name)) processors.append(proc) return processors def get_config(self): return CombinedConfig(self.settings, self.run_config) def finalize(self): if not self.agenda: msg = 'Attempting to finalize config before agenda has been set' raise RuntimeError(msg) self.run_config.merge_device_config(self.plugin_cache) return self.get_config() def generate_jobs(self, context): job_specs = self.jobs_config.generate_job_specs(context.tm) if not job_specs: msg = 'No jobs available for running.' raise ConfigError(msg) exec_order = self.run_config.execution_order log.indent() for spec, i in permute_iterations(job_specs, exec_order): job = Job(spec, i, context) job.load(context.tm.target) self._jobs.append(job) context.run_state.add_job(job) log.dedent() self._jobs_generated = True def permute_by_workload(specs): """ This is that "classic" implementation that executes all iterations of a workload spec before proceeding onto the next spec. """ for spec in specs: for i in range(1, spec.iterations + 1): yield (spec, i) def permute_by_iteration(specs): """ Runs the first iteration for all benchmarks first, before proceeding to the next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2... If multiple sections where specified in the agenda, this will run all sections for the first global spec first, followed by all sections for the second spec, etc. e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2 """ groups = [list(g) for _, g in groupby(specs, lambda s: s.workload_id)] all_tuples = [] for spec in chain(*groups): all_tuples.append([(spec, i + 1) for i in range(spec.iterations)]) for t in chain(*list(map(list, zip_longest(*all_tuples)))): if t is not None: yield t def permute_by_section(specs): """ Runs the first iteration for all benchmarks first, before proceeding to the next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2... If multiple sections where specified in the agenda, this will run all specs for the first section followed by all specs for the seciod section, etc. e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2 """ groups = [list(g) for _, g in groupby(specs, lambda s: s.section_id)] all_tuples = [] for spec in chain(*groups): all_tuples.append([(spec, i + 1) for i in range(spec.iterations)]) for t in chain(*list(map(list, zip_longest(*all_tuples)))): if t is not None: yield t def permute_randomly(specs): """ This will generate a random permutation of specs/iteration tuples. """ result = [] for spec in specs: for i in range(1, spec.iterations + 1): result.append((spec, i)) random.shuffle(result) for t in result: yield t permute_map = { 'by_iteration': permute_by_iteration, 'by_workload': permute_by_workload, 'by_section': permute_by_section, 'random': permute_randomly, } def permute_iterations(specs, exec_order): if exec_order not in permute_map: msg = 'Unknown execution order "{}"; must be in: {}' raise ValueError(msg.format(exec_order, list(permute_map.keys()))) return permute_map[exec_order](specs) ================================================ FILE: wa/framework/configuration/parsers.py ================================================ # Copyright 2015-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=no-self-use import os import logging from functools import reduce # pylint: disable=redefined-builtin from devlib.utils.types import identifier from wa.framework.configuration.core import JobSpec from wa.framework.exception import ConfigError from wa.utils import log from wa.utils.serializer import json, read_pod, SerializerSyntaxError from wa.utils.types import toggle_set, counter from wa.utils.misc import merge_config_values, isiterable logger = logging.getLogger('config') class ConfigParser(object): def load_from_path(self, state, filepath): raw, includes = _load_file(filepath, "Config") self.load(state, raw, filepath) return includes def load(self, state, raw, source, wrap_exceptions=True): # pylint: disable=too-many-branches logger.debug('Parsing config from "{}"'.format(source)) log.indent() try: state.plugin_cache.add_source(source) if 'run_name' in raw: msg = '"run_name" can only be specified in the config '\ 'section of an agenda' raise ConfigError(msg) if 'id' in raw: raise ConfigError('"id" cannot be set globally') merge_augmentations(raw) # Get WA core configuration for cfg_point in state.settings.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting meta "{}" to "{}"'.format(cfg_point.name, value)) state.settings.set(cfg_point.name, value) # Get run specific configuration for cfg_point in state.run_config.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting run "{}" to "{}"'.format(cfg_point.name, value)) state.run_config.set(cfg_point.name, value) # Get global job spec configuration for cfg_point in JobSpec.configuration.values(): value = pop_aliased_param(cfg_point, raw) if value is not None: logger.debug('Setting global "{}" to "{}"'.format(cfg_point.name, value)) state.jobs_config.set_global_value(cfg_point.name, value) for name, values in raw.items(): # Assume that all leftover config is for a plug-in or a global # alias it is up to PluginCache to assert this assumption logger.debug('Caching "{}" with "{}"'.format(identifier(name), values)) state.plugin_cache.add_configs(identifier(name), values, source) except ConfigError as e: if wrap_exceptions: raise ConfigError('Error in "{}":\n{}'.format(source, str(e))) else: raise e finally: log.dedent() class AgendaParser(object): def load_from_path(self, state, filepath): raw, includes = _load_file(filepath, 'Agenda') self.load(state, raw, filepath) return includes def load(self, state, raw, source): logger.debug('Parsing agenda from "{}"'.format(source)) log.indent() try: if not isinstance(raw, dict): raise ConfigError('Invalid agenda, top level entry must be a dict') self._populate_and_validate_config(state, raw, source) sections = self._pop_sections(raw) global_workloads = self._pop_workloads(raw) if not global_workloads: msg = 'No jobs avaliable. Please ensure you have specified at '\ 'least one workload to run.' raise ConfigError(msg) if raw: msg = 'Invalid top level agenda entry(ies): "{}"' raise ConfigError(msg.format('", "'.join(list(raw.keys())))) sect_ids, wkl_ids = self._collect_ids(sections, global_workloads) self._process_global_workloads(state, global_workloads, wkl_ids) self._process_sections(state, sections, sect_ids, wkl_ids) state.agenda = source except (ConfigError, SerializerSyntaxError) as e: raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e))) finally: log.dedent() def _populate_and_validate_config(self, state, raw, source): for name in ['config', 'global']: entry = raw.pop(name, None) if entry is None: continue if not isinstance(entry, dict): msg = 'Invalid entry "{}" - must be a dict' raise ConfigError(msg.format(name)) if 'run_name' in entry: value = entry.pop('run_name') logger.debug('Setting run name to "{}"'.format(value)) state.run_config.set('run_name', value) state.load_config(entry, '{}/{}'.format(source, name)) def _pop_sections(self, raw): sections = raw.pop("sections", []) if not isinstance(sections, list): raise ConfigError('Invalid entry "sections" - must be a list') for section in sections: if not hasattr(section, 'items'): raise ConfigError('Invalid section "{}" - must be a dict'.format(section)) return sections def _pop_workloads(self, raw): workloads = raw.pop("workloads", []) if not isinstance(workloads, list): raise ConfigError('Invalid entry "workloads" - must be a list') return workloads def _collect_ids(self, sections, global_workloads): seen_section_ids = set() seen_workload_ids = set() for workload in global_workloads: workload = _get_workload_entry(workload) _collect_valid_id(workload.get("id"), seen_workload_ids, "workload") for section in sections: _collect_valid_id(section.get("id"), seen_section_ids, "section") for workload in section["workloads"] if "workloads" in section else []: workload = _get_workload_entry(workload) _collect_valid_id(workload.get("id"), seen_workload_ids, "workload") return seen_section_ids, seen_workload_ids def _process_global_workloads(self, state, global_workloads, seen_wkl_ids): for workload_entry in global_workloads: workload = _process_workload_entry(workload_entry, seen_wkl_ids, state.jobs_config) state.jobs_config.add_workload(workload) def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids): for section in sections: workloads = [] for workload_entry in section.pop("workloads", []): workload = _process_workload_entry(workload_entry, seen_wkl_ids, state.jobs_config) workloads.append(workload) if 'params' in section: if 'runtime_params' in section: msg = 'both "params" and "runtime_params" specified in a '\ 'section: "{}"' raise ConfigError(msg.format(json.dumps(section, indent=None))) section['runtime_params'] = section.pop('params') group = section.pop('group', None) section = _construct_valid_entry(section, seen_sect_ids, "s", state.jobs_config) state.jobs_config.add_section(section, workloads, group) ######################## ### Helper functions ### ######################## def pop_aliased_param(cfg_point, d, default=None): """ Given a ConfigurationPoint and a dict, this function will search the dict for the ConfigurationPoint's name/aliases. If more than one is found it will raise a ConfigError. If one (and only one) is found then it will return the value for the ConfigurationPoint. If the name or aliases are present in the dict it will return the "default" parameter of this function. """ aliases = [cfg_point.name] + cfg_point.aliases alias_map = [a for a in aliases if a in d] if len(alias_map) > 1: raise ConfigError('Duplicate entry: {}'.format(aliases)) elif alias_map: return d.pop(alias_map[0]) else: return default def _load_file(filepath, error_name): if not os.path.isfile(filepath): raise ValueError("{} does not exist".format(filepath)) try: raw = read_pod(filepath) includes = _process_includes(raw, filepath, error_name) except SerializerSyntaxError as e: raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e)) if not isinstance(raw, dict): message = '{} does not contain a valid {} structure; top level must be a dict.' raise ConfigError(message.format(filepath, error_name)) return raw, includes def _config_values_from_includes(filepath, include_path, error_name): source_dir = os.path.dirname(filepath) included_files = [] if isinstance(include_path, str): include_path = os.path.expanduser(os.path.join(source_dir, include_path)) replace_value, includes = _load_file(include_path, error_name) included_files.append(include_path) included_files.extend(includes) elif isinstance(include_path, list): replace_value = {} for path in include_path: include_path = os.path.expanduser(os.path.join(source_dir, path)) sub_replace_value, includes = _load_file(include_path, error_name) for key, val in sub_replace_value.items(): replace_value[key] = merge_config_values(val, replace_value.get(key, None)) included_files.append(include_path) included_files.extend(includes) else: message = "{} does not contain a valid {} structure; value for 'include#' must be a string or a list" raise ConfigError(message.format(filepath, error_name)) return replace_value, included_files def _process_includes(raw, filepath, error_name): if not raw: return [] included_files = [] replace_value = None if hasattr(raw, 'items'): for key, value in raw.items(): if key == 'include#': replace_value, includes = _config_values_from_includes(filepath, value, error_name) included_files.extend(includes) elif hasattr(value, 'items') or isiterable(value): includes = _process_includes(value, filepath, error_name) included_files.extend(includes) elif isiterable(raw): for element in raw: if hasattr(element, 'items') or isiterable(element): includes = _process_includes(element, filepath, error_name) included_files.extend(includes) if replace_value is not None: del raw['include#'] for key, value in replace_value.items(): raw[key] = merge_config_values(value, raw.get(key, None)) return included_files def merge_augmentations(raw): """ Since, from configuration perspective, output processors and instruments are handled identically, the configuration entries are now interchangeable. E.g. it is now valid to specify a output processor in an instruments list. This is to make things easier for the users, as, from their perspective, the distinction is somewhat arbitrary. For backwards compatibility, both entries are still valid, and this function merges them together into a single "augmentations" set, ensuring that there are no conflicts between the entries. """ cfg_point = JobSpec.configuration['augmentations'] names = [cfg_point.name, ] + cfg_point.aliases entries = [] for n in names: if n not in raw: continue value = raw.pop(n) try: entries.append(toggle_set(value)) except TypeError as exc: msg = 'Invalid value "{}" for "{}": {}' raise ConfigError(msg.format(value, n, exc)) # Make sure none of the specified aliases conflict with each other to_check = list(entries) while len(to_check) > 1: check_entry = to_check.pop() for e in to_check: conflicts = check_entry.conflicts_with(e) if conflicts: msg = '"{}" and "{}" have conflicting entries: {}' conflict_string = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts) raise ConfigError(msg.format(check_entry, e, conflict_string)) if entries: raw['augmentations'] = reduce(lambda x, y: x.union(y), entries) def _pop_aliased(d, names, entry_id): name_count = sum(1 for n in names if n in d) if name_count > 1: names_list = ', '.join(names) msg = 'Invalid workload entry "{}": at most one of ({}}) must be specified.' raise ConfigError(msg.format(entry_id, names_list)) for name in names: if name in d: return d.pop(name) return None def _construct_valid_entry(raw, seen_ids, prefix, jobs_config): workload_entry = {} # Generate an automatic ID if the entry doesn't already have one if 'id' not in raw: while True: new_id = '{}{}'.format(prefix, counter(name=prefix)) if new_id not in seen_ids: break workload_entry['id'] = new_id seen_ids.add(new_id) else: workload_entry['id'] = raw.pop('id') # Process instruments merge_augmentations(raw) # Validate all workload_entry for name, cfg_point in JobSpec.configuration.items(): value = pop_aliased_param(cfg_point, raw) if value is not None: value = cfg_point.kind(value) cfg_point.validate_value(name, value) workload_entry[name] = value if "augmentations" in workload_entry: if '~~' in workload_entry['augmentations']: msg = '"~~" can only be specfied in top-level config, and not for individual workloads/sections' raise ConfigError(msg) jobs_config.update_augmentations(workload_entry['augmentations']) # error if there are unknown workload_entry if raw: msg = 'Invalid entry(ies) in "{}": "{}"' raise ConfigError(msg.format(workload_entry['id'], ', '.join(list(raw.keys())))) return workload_entry def _collect_valid_id(entry_id, seen_ids, entry_type): if entry_id is None: return entry_id = str(entry_id) if entry_id in seen_ids: raise ConfigError('Duplicate {} ID "{}".'.format(entry_type, entry_id)) # "-" is reserved for joining section and workload IDs if "-" in entry_id: msg = 'Invalid {} ID "{}"; IDs cannot contain a "-"' raise ConfigError(msg.format(entry_type, entry_id)) if entry_id == "global": msg = 'Invalid {} ID "global"; is a reserved ID' raise ConfigError(msg.format(entry_type)) seen_ids.add(entry_id) def _get_workload_entry(workload): if isinstance(workload, str): workload = {'name': workload} elif not isinstance(workload, dict): raise ConfigError('Invalid workload entry: "{}"') return workload def _process_workload_entry(workload, seen_workload_ids, jobs_config): workload = _get_workload_entry(workload) workload = _construct_valid_entry(workload, seen_workload_ids, "wk", jobs_config) if "workload_name" not in workload: raise ConfigError('No workload name specified in entry {}'.format(workload['id'])) return workload ================================================ FILE: wa/framework/configuration/plugin_cache.py ================================================ # Copyright 2016-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import copy from collections import defaultdict from itertools import chain from devlib.utils.misc import memoized from wa.framework import pluginloader from wa.framework.configuration.core import get_config_point_map from wa.framework.exception import ConfigError, NotFoundError from wa.framework.target.descriptor import list_target_descriptions from wa.utils.types import obj_dict, caseless_string GENERIC_CONFIGS = ["device_config", "workload_parameters", "boot_parameters", "runtime_parameters"] class PluginCache(object): """ The plugin cache is used to store configuration that cannot be processed at this stage, whether thats because it is unknown if its needed (in the case of disabled plug-ins) or it is not know what it belongs to (in the case of "device-config" ect.). It also maintains where configuration came from, and the priority order of said sources. """ def __init__(self, loader=pluginloader): self.loader = loader self.sources = [] self.plugin_configs = defaultdict(lambda: defaultdict(dict)) self.global_alias_values = defaultdict(dict) self.targets = {td.name: td for td in list_target_descriptions()} # Generate a mapping of what global aliases belong to self._global_alias_map = defaultdict(dict) self._list_of_global_aliases = set() for plugin in self.loader.list_plugins(): for param in plugin.parameters: if param.global_alias: self._global_alias_map[plugin.name][param.global_alias] = param self._list_of_global_aliases.add(param.global_alias) def add_source(self, source): if source in self.sources: msg = "Source '{}' has already been added." raise Exception(msg.format(source)) self.sources.append(source) def add_global_alias(self, alias, value, source): if source not in self.sources: msg = "Source '{}' has not been added to the plugin cache." raise RuntimeError(msg.format(source)) if not self.is_global_alias(alias): msg = "'{} is not a valid global alias'" raise RuntimeError(msg.format(alias)) self.global_alias_values[alias][source] = value def add_configs(self, plugin_name, values, source): if self.is_global_alias(plugin_name): self.add_global_alias(plugin_name, values, source) return if source not in self.sources: msg = "Source '{}' has not been added to the plugin cache." raise RuntimeError(msg.format(source)) if caseless_string(plugin_name) in ['global', 'config']: msg = '"{}" entry specified inside config/global section; If this is ' \ 'defined in a config file, move the entry content into the top level' raise ConfigError(msg.format((plugin_name))) if (not self.loader.has_plugin(plugin_name) and plugin_name not in self.targets and plugin_name not in GENERIC_CONFIGS): msg = 'configuration provided for unknown plugin "{}"' raise ConfigError(msg.format(plugin_name)) if not hasattr(values, 'items'): msg = 'Plugin configuration for "{}" not a dictionary ({} is {})' raise ConfigError(msg.format(plugin_name, repr(values), type(values))) for name, value in values.items(): if (plugin_name not in GENERIC_CONFIGS and name not in self.get_plugin_parameters(plugin_name)): msg = "'{}' is not a valid parameter for '{}'" raise ConfigError(msg.format(name, plugin_name)) self.plugin_configs[plugin_name][source][name] = value def is_global_alias(self, name): return name in self._list_of_global_aliases def list_plugins(self, kind=None): return self.loader.list_plugins(kind) def get_plugin_config(self, plugin_name, generic_name=None, is_final=True): config = obj_dict(not_in_dict=['name']) config.name = plugin_name if plugin_name not in GENERIC_CONFIGS: self._set_plugin_defaults(plugin_name, config) self._set_from_global_aliases(plugin_name, config) if generic_name is None: # Perform a simple merge with the order of sources representing # priority plugin_config = self.plugin_configs[plugin_name] cfg_points = self.get_plugin_parameters(plugin_name) for source in self.sources: if source not in plugin_config: continue for name, value in plugin_config[source].items(): cfg_points[name].set_value(config, value=value) else: # A more complicated merge that involves priority of sources and # specificity self._merge_using_priority_specificity(plugin_name, generic_name, config, is_final) return config def get_plugin(self, name, kind=None, *args, **kwargs): config = self.get_plugin_config(name) kwargs = dict(list(config.items()) + list(kwargs.items())) return self.loader.get_plugin(name, kind=kind, *args, **kwargs) def get_plugin_class(self, name, kind=None): return self.loader.get_plugin_class(name, kind) @memoized def get_plugin_parameters(self, name): if name in self.targets: return self._get_target_params(name) params = self.loader.get_plugin_class(name).parameters return get_config_point_map(params) def resolve_alias(self, name): return self.loader.resolve_alias(name) def _set_plugin_defaults(self, plugin_name, config): cfg_points = self.get_plugin_parameters(plugin_name) for cfg_point in cfg_points.values(): cfg_point.set_value(config, check_mandatory=False) try: _, alias_params = self.resolve_alias(plugin_name) for name, value in alias_params.items(): cfg_points[name].set_value(config, value) except NotFoundError: pass def _set_from_global_aliases(self, plugin_name, config): for alias, param in self._global_alias_map[plugin_name].items(): if alias in self.global_alias_values: for source in self.sources: if source not in self.global_alias_values[alias]: continue val = self.global_alias_values[alias][source] param.set_value(config, value=val) def _get_target_params(self, name): td = self.targets[name] return get_config_point_map(chain(td.target_params, td.platform_params, td.conn_params, td.assistant_params)) # pylint: disable=too-many-nested-blocks, too-many-branches def _merge_using_priority_specificity(self, specific_name, generic_name, merged_config, is_final=True): """ WA configuration can come from various sources of increasing priority, as well as being specified in a generic and specific manner (e.g ``device_config`` and ``nexus10`` respectivly). WA has two rules for the priority of configuration: - Configuration from higher priority sources overrides configuration from lower priority sources. - More specific configuration overrides less specific configuration. There is a situation where these two rules come into conflict. When a generic configuration is given in config source of high priority and a specific configuration is given in a config source of lower priority. In this situation it is not possible to know the end users intention and WA will error. :param specific_name: The name of the specific configuration used e.g ``nexus10`` :param generic_name: The name of the generic configuration e.g ``device_config`` :param merge_config: A dict of ``ConfigurationPoint``s to be used when merging configuration. keys=config point name, values=config point :param is_final: if ``True`` (the default) make sure that mandatory parameters are set. :rtype: A fully merged and validated configuration in the form of a obj_dict. """ ms = MergeState() ms.generic_name = generic_name ms.specific_name = specific_name ms.generic_config = copy(self.plugin_configs[generic_name]) ms.specific_config = copy(self.plugin_configs[specific_name]) ms.cfg_points = self.get_plugin_parameters(specific_name) sources = self.sources # set_value uses the 'name' attribute of the passed object in it error # messages, to ensure these messages make sense the name will have to be # changed several times during this function. merged_config.name = specific_name for source in sources: try: update_config_from_source(merged_config, source, ms) except ConfigError as e: raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e))) # Validate final configuration merged_config.name = specific_name for cfg_point in ms.cfg_points.values(): cfg_point.validate(merged_config, check_mandatory=is_final) def __getattr__(self, name): """ This resolves methods for specific plugins types based on corresponding generic plugin methods. So it's possible to say things like :: loader.get_device('foo') instead of :: loader.get_plugin('foo', kind='device') """ error_msg = 'No plugins of type "{}" discovered' if name.startswith('get_'): name = name.replace('get_', '', 1) if name in self.loader.kind_map: def __wrapper(pname, *args, **kwargs): return self.get_plugin(pname, name, *args, **kwargs) return __wrapper raise NotFoundError(error_msg.format(name)) if name.startswith('list_'): name = name.replace('list_', '', 1).rstrip('s') if name in self.loader.kind_map: def __wrapper(*args, **kwargs): # pylint: disable=E0102 return self.list_plugins(name, *args, **kwargs) return __wrapper raise NotFoundError(error_msg.format(name)) if name.startswith('has_'): name = name.replace('has_', '', 1) if name in self.loader.kind_map: def __wrapper(pname, *args, **kwargs): # pylint: disable=E0102 return self.loader.has_plugin(pname, name, *args, **kwargs) return __wrapper raise NotFoundError(error_msg.format(name)) raise AttributeError(name) class MergeState(object): def __init__(self): self.generic_name = None self.specific_name = None self.generic_config = None self.specific_config = None self.cfg_points = None self.seen_specific_config = defaultdict(list) def update_config_from_source(final_config, source, state): if source in state.generic_config: final_config.name = state.generic_name for name, cfg_point in state.cfg_points.items(): if name in state.generic_config[source]: if name in state.seen_specific_config: msg = ('"{generic_name}" configuration "{config_name}" has ' 'already been specified more specifically for ' '{specific_name} in:\n\t\t{sources}') seen_sources = state.seen_specific_config[name] msg = msg.format(generic_name=state.generic_name, config_name=name, specific_name=state.specific_name, sources=", ".join(seen_sources)) raise ConfigError(msg) value = state.generic_config[source].pop(name) cfg_point.set_value(final_config, value, check_mandatory=False) if state.generic_config[source]: msg = 'Unexpected values for {}: {}' raise ConfigError(msg.format(state.generic_name, state.generic_config[source])) if source in state.specific_config: final_config.name = state.specific_name for name, cfg_point in state.cfg_points.items(): if name in state.specific_config[source]: state.seen_specific_config[name].append(str(source)) value = state.specific_config[source].pop(name) cfg_point.set_value(final_config, value, check_mandatory=False) if state.specific_config[source]: msg = 'Unexpected values for {}: {}' raise ConfigError(msg.format(state.specific_name, state.specific_config[source])) ================================================ FILE: wa/framework/configuration/tree.py ================================================ # Copyright 2016-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from wa.utils import log logger = logging.getLogger('config') class JobSpecSource(object): kind = "" def __init__(self, config, parent=None): self.config = config self.parent = parent self._log_self() @property def id(self): return self.config['id'] @property def name(self): raise NotImplementedError() def _log_self(self): logger.debug('Creating {} node'.format(self.kind)) with log.indentcontext(): for key, value in self.config.items(): logger.debug('"{}" to "{}"'.format(key, value)) class WorkloadEntry(JobSpecSource): kind = "workload" @property def name(self): if self.parent.id == "global": return 'workload "{}"'.format(self.id) else: return 'workload "{}" from section "{}"'.format(self.id, self.parent.id) class SectionNode(JobSpecSource): kind = "section" @property def name(self): if self.id == "global": return "globally specified configuration" else: return 'section "{}"'.format(self.id) @property def is_leaf(self): return not bool(self.children) def __init__(self, config, parent=None, group=None): super(SectionNode, self).__init__(config, parent=parent) self.workload_entries = [] self.children = [] self.group = group def add_section(self, section, group=None): # Each level is the same group, only need to check first if not self.children or group == self.children[0].group: new_node = SectionNode(section, parent=self, group=group) self.children.append(new_node) else: for child in self.children: new_node = child.add_section(section, group) return new_node def add_workload(self, workload_config): self.workload_entries.append(WorkloadEntry(workload_config, self)) def descendants(self): for child in self.children: for n in child.descendants(): yield n yield child def ancestors(self): if self.parent is not None: yield self.parent for ancestor in self.parent.ancestors(): yield ancestor def leaves(self): if self.is_leaf: yield self else: for n in self.descendants(): if n.is_leaf: yield n ================================================ FILE: wa/framework/entrypoint.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import argparse import locale import logging import os import warnings import devlib try: from devlib.utils.version import version as installed_devlib_version except ImportError: installed_devlib_version = None from wa.framework import pluginloader from wa.framework.command import init_argument_parser from wa.framework.configuration import settings from wa.framework.configuration.execution import ConfigManager from wa.framework.host import init_user_directory, init_config from wa.framework.exception import ConfigError, HostError from wa.framework.version import (get_wa_version_with_commit, format_version, required_devlib_version) from wa.utils import log from wa.utils.doc import format_body warnings.filterwarnings(action='ignore', category=UserWarning, module='zope') # Disable this to avoid false positive from dynamically-created attributes. # pylint: disable=no-member logger = logging.getLogger('command_line') def load_commands(subparsers): commands = {} for command in pluginloader.list_commands(): commands[command.name] = pluginloader.get_command(command.name, subparsers=subparsers) return commands # ArgumentParser.parse_known_args() does not correctly deal with concatenated # single character options. See https://bugs.python.org/issue16142 for the # description of the issue (with a fix attached since 2013!). To get around # this problem, this will pre-process sys.argv to detect such joined options # and split them. def split_joined_options(argv): output = [] for part in argv: if len(part) > 1 and part[0] == '-' and part[1] != '-': for c in part[1:]: output.append('-' + c) else: output.append(part) return output # Instead of presenting an obscure error due to a version mismatch explicitly warn the user. def check_devlib_version(): if not installed_devlib_version or installed_devlib_version[:-1] <= required_devlib_version[:-1]: # Check the 'dev' field separately to account for comparing with release versions. if installed_devlib_version.dev and installed_devlib_version.dev < required_devlib_version.dev: msg = 'WA requires Devlib version >={}. Please update the currently installed version {}' raise HostError(msg.format(format_version(required_devlib_version), devlib.__version__)) # If the default encoding is not UTF-8 warn the user as this may cause compatibility issues # when parsing files. def check_system_encoding(): system_encoding = locale.getpreferredencoding() msg = 'System Encoding: {}'.format(system_encoding) if 'UTF-8' not in system_encoding: logger.warning(msg) logger.warning('To prevent encoding issues please use a locale setting which supports UTF-8') else: logger.debug(msg) def main(): if not os.path.exists(settings.user_directory): init_user_directory() if not os.path.exists(os.path.join(settings.user_directory, 'config.yaml')): init_config() try: description = ("Execute automated workloads on a remote device and process " "the resulting output.\n\nUse \"wa -h\" to see " "help for individual subcommands.") parser = argparse.ArgumentParser(description=format_body(description, 80), prog='wa', formatter_class=argparse.RawDescriptionHelpFormatter, ) init_argument_parser(parser) # load_commands will trigger plugin enumeration, and we want logging # to be enabled for that, which requires the verbosity setting; however # full argument parsing cannot be completed until the commands are loaded; so # parse just the base args for now so we can get verbosity. argv = split_joined_options(sys.argv[1:]) # 'Parse_known_args' automatically displays the default help and exits # if '-h' or '--help' is detected, we want our custom help messages so # ensure these are never passed as parameters. filtered_argv = list(argv) if '-h' in filtered_argv: filtered_argv.remove('-h') elif '--help' in filtered_argv: filtered_argv.remove('--help') args, _ = parser.parse_known_args(filtered_argv) settings.set("verbosity", args.verbose) log.init(settings.verbosity) logger.debug('Version: {}'.format(get_wa_version_with_commit())) logger.debug('devlib version: {}'.format(devlib.__full_version__)) logger.debug('Command Line: {}'.format(' '.join(sys.argv))) check_devlib_version() check_system_encoding() # each command will add its own subparser subparsers = parser.add_subparsers(dest='command') subparsers.required = True commands = load_commands(subparsers) args = parser.parse_args(argv) config = ConfigManager() config.load_config_file(settings.user_config_file) for config_file in args.config: if not os.path.exists(config_file): raise ConfigError("Config file {} not found".format(config_file)) config.load_config_file(config_file) command = commands[args.command] sys.exit(command.execute(config, args)) except KeyboardInterrupt as e: log.log_error(e, logger) sys.exit(3) except Exception as e: # pylint: disable=broad-except log.log_error(e, logger) sys.exit(2) ================================================ FILE: wa/framework/exception.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=unused-import from devlib.exception import (DevlibError, HostError, TimeoutError, # pylint: disable=redefined-builtin TargetError, TargetNotRespondingError) from wa.utils.misc import get_traceback class WAError(Exception): """Base class for all Workload Automation exceptions.""" @property def message(self): if self.args: return self.args[0] return '' class NotFoundError(WAError): """Raised when the specified item is not found.""" class ValidationError(WAError): """Raised on failure to validate an extension.""" class ExecutionError(WAError): """Error encountered by the execution framework.""" class WorkloadError(WAError): """General Workload error.""" class JobError(WAError): """Job execution error.""" class InstrumentError(WAError): """General Instrument error.""" class OutputProcessorError(WAError): """General OutputProcessor error.""" class ResourceError(WAError): """General Resolver error.""" class CommandError(WAError): """Raised by commands when they have encountered an error condition during execution.""" class ToolError(WAError): """Raised by tools when they have encountered an error condition during execution.""" class ConfigError(WAError): """Raised when configuration provided is invalid. This error suggests that the user should modify their config and try again.""" class SerializerSyntaxError(Exception): """ Error loading a serialized structure from/to a file handle. """ @property def message(self): if self.args: return self.args[0] return '' def __init__(self, message, line=None, column=None): super(SerializerSyntaxError, self).__init__(message) self.line = line self.column = column def __str__(self): linestring = ' on line {}'.format(self.line) if self.line else '' colstring = ' in column {}'.format(self.column) if self.column else '' message = 'Syntax Error{}: {}' return message.format(''.join([linestring, colstring]), self.message) class PluginLoaderError(WAError): """Raised when there is an error loading an extension or an external resource. Apart form the usual message, the __init__ takes an exc_info parameter which should be the result of sys.exc_info() for the original exception (if any) that caused the error.""" def __init__(self, message, exc_info=None): super(PluginLoaderError, self).__init__(message) self.exc_info = exc_info def __str__(self): if self.exc_info: orig = self.exc_info[1] orig_name = type(orig).__name__ if isinstance(orig, WAError): reason = 'because of:\n{}: {}'.format(orig_name, orig) else: text = 'because of:\n{}\n{}: {}' reason = text.format(get_traceback(self.exc_info), orig_name, orig) return '\n'.join([self.message, reason]) else: return self.message class WorkerThreadError(WAError): """ This should get raised in the main thread if a non-WAError-derived exception occurs on a worker/background thread. If a WAError-derived exception is raised in the worker, then it that exception should be re-raised on the main thread directly -- the main point of this is to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors. """ def __init__(self, thread, exc_info): self.thread = thread self.exc_info = exc_info orig = self.exc_info[1] orig_name = type(orig).__name__ text = 'Exception of type {} occured on thread {}:\n{}\n{}: {}' message = text.format(orig_name, thread, get_traceback(self.exc_info), orig_name, orig) super(WorkerThreadError, self).__init__(message) ================================================ FILE: wa/framework/execution.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=no-member import hashlib import logging import os import shutil from copy import copy from datetime import datetime import wa.framework.signal as signal from wa.framework import instrument as instrumentation from wa.framework.configuration.core import Status from wa.framework.exception import TargetError, HostError, WorkloadError, ExecutionError from wa.framework.exception import TargetNotRespondingError, TimeoutError # pylint: disable=redefined-builtin from wa.framework.job import Job from wa.framework.output import init_job_output from wa.framework.output_processor import ProcessorManager from wa.framework.resource import ResourceResolver from wa.framework.target.manager import TargetManager from wa.utils import log from wa.utils.misc import merge_config_values, format_duration class ExecutionContext(object): @property def previous_job(self): if not self.completed_jobs: return None return self.completed_jobs[-1] @property def next_job(self): if not self.job_queue: return None return self.job_queue[0] @property def spec_changed(self): if self.previous_job is None and self.current_job is not None: # Start of run return True if self.previous_job is not None and self.current_job is None: # End of run return True return self.current_job.spec.id != self.previous_job.spec.id @property def spec_will_change(self): if self.current_job is None and self.next_job is not None: # Start of run return True if self.current_job is not None and self.next_job is None: # End of run return True return self.current_job.spec.id != self.next_job.spec.id @property def workload(self): if self.current_job: return self.current_job.workload @property def job_output(self): if self.current_job: return self.current_job.output @property def output(self): if self.current_job: return self.job_output return self.run_output @property def output_directory(self): return self.output.basepath @property def reboot_policy(self): return self.cm.run_config.reboot_policy @property def target_info(self): return self.run_output.target_info def __init__(self, cm, tm, output): self.logger = logging.getLogger('context') self.cm = cm self.tm = tm self.run_output = output self.run_state = output.state self.job_queue = None self.completed_jobs = None self.current_job = None self.successful_jobs = 0 self.failed_jobs = 0 self.run_interrupted = False self._load_resource_getters() def start_run(self): self.output.info.start_time = datetime.utcnow() self.output.write_info() self.job_queue = copy(self.cm.jobs) self.completed_jobs = [] self.run_state.status = Status.STARTED self.output.status = Status.STARTED self.output.write_state() def end_run(self): if self.successful_jobs: if self.failed_jobs: status = Status.PARTIAL else: status = Status.OK else: status = Status.FAILED self.run_state.status = status self.run_output.status = status self.run_output.info.end_time = datetime.utcnow() self.run_output.info.duration = (self.run_output.info.end_time - self.run_output.info.start_time) self.write_output() def finalize(self): self.tm.finalize() def start_job(self): if not self.job_queue: raise RuntimeError('No jobs to run') self.current_job = self.job_queue.pop(0) job_output = init_job_output(self.run_output, self.current_job) self.current_job.set_output(job_output) return self.current_job def end_job(self): if not self.current_job: raise RuntimeError('No jobs in progress') self.completed_jobs.append(self.current_job) self.output.write_result() self.current_job = None def set_status(self, status, force=False, write=True): if not self.current_job: raise RuntimeError('No jobs in progress') self.set_job_status(self.current_job, status, force, write) def set_job_status(self, job, status, force=False, write=True): job.set_status(status, force) if write: self.run_output.write_state() def extract_results(self): self.tm.extract_results(self) def move_failed(self, job): self.run_output.move_failed(job.output) def skip_job(self, job): self.set_job_status(job, Status.SKIPPED, force=True) self.completed_jobs.append(job) def skip_remaining_jobs(self): while self.job_queue: job = self.job_queue.pop(0) self.skip_job(job) self.write_state() def write_config(self): self.run_output.write_config(self.cm.get_config()) def write_state(self): self.run_output.write_state() def write_output(self): self.run_output.write_info() self.run_output.write_state() self.run_output.write_result() def write_job_specs(self): self.run_output.write_job_specs(self.cm.job_specs) def add_augmentation(self, aug): self.cm.run_config.add_augmentation(aug) def get_resource(self, resource, strict=True): result = self.resolver.get(resource, strict) if result is None: return result if os.path.isfile(result): with open(result, 'rb') as fh: md5hash = hashlib.md5(fh.read()) key = '{}/{}'.format(resource.owner, os.path.basename(result)) self.update_metadata('hashes', key, md5hash.hexdigest()) return result get = get_resource # alias to allow a context to act as a resolver def get_metric(self, name): try: return self.output.get_metric(name) except HostError: if not self.current_job: raise return self.run_output.get_metric(name) def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None): if self.current_job: classifiers = merge_config_values(self.current_job.classifiers, classifiers) self.output.add_metric(name, value, units, lower_is_better, classifiers) def get_artifact(self, name): try: return self.output.get_artifact(name) except HostError: if not self.current_job: raise return self.run_output.get_artifact(name) def get_artifact_path(self, name): try: return self.output.get_artifact_path(name) except HostError: if not self.current_job: raise return self.run_output.get_artifact_path(name) def add_artifact(self, name, path, kind, description=None, classifiers=None): self.output.add_artifact(name, path, kind, description, classifiers) def add_run_artifact(self, name, path, kind, description=None, classifiers=None): self.run_output.add_artifact(name, path, kind, description, classifiers) def add_event(self, message): self.output.add_event(message) def add_classifier(self, name, value, overwrite=False): self.output.add_classifier(name, value, overwrite) if self.current_job: self.current_job.add_classifier(name, value, overwrite) def add_metadata(self, key, *args, **kwargs): self.output.add_metadata(key, *args, **kwargs) def update_metadata(self, key, *args): self.output.update_metadata(key, *args) def take_screenshot(self, filename): filepath = self._get_unique_filepath(filename) self.tm.target.capture_screen(filepath) if os.path.isfile(filepath): self.add_artifact('screenshot', filepath, kind='log') def take_uiautomator_dump(self, filename): filepath = self._get_unique_filepath(filename) self.tm.target.capture_ui_hierarchy(filepath) self.add_artifact('uitree', filepath, kind='log') def record_ui_state(self, basename): self.logger.info('Recording screen state...') self.take_screenshot('{}.png'.format(basename)) target = self.tm.target if target.os == 'android' or\ (target.os == 'chromeos' and target.has('android_container')): self.take_uiautomator_dump('{}.uix'.format(basename)) def initialize_jobs(self): new_queue = [] failed_ids = [] for job in self.job_queue: if job.id in failed_ids: # Don't try to initialize a job if another job with the same ID # (i.e. same job spec) has failed - we can assume it will fail # too. self.skip_job(job) continue try: job.initialize(self) except WorkloadError as e: self.set_job_status(job, Status.FAILED, write=False) log.log_error(e, self.logger) failed_ids.append(job.id) if self.cm.run_config.bail_on_init_failure: raise else: new_queue.append(job) self.job_queue = new_queue self.write_state() def _load_resource_getters(self): self.logger.debug('Loading resource discoverers') self.resolver = ResourceResolver(self.cm.plugin_cache) self.resolver.load() for getter in self.resolver.getters: self.cm.run_config.add_resource_getter(getter) def _get_unique_filepath(self, filename): filepath = os.path.join(self.output_directory, filename) rest, ext = os.path.splitext(filepath) i = 1 new_filepath = '{}-{}{}'.format(rest, i, ext) if not os.path.exists(filepath) and not os.path.exists(new_filepath): return filepath elif not os.path.exists(new_filepath): # new_filepath does not exit, thefore filepath must exit. # this is the first collision shutil.move(filepath, new_filepath) while os.path.exists(new_filepath): i += 1 new_filepath = '{}-{}{}'.format(rest, i, ext) return new_filepath class Executor(object): """ The ``Executor``'s job is to set up the execution context and pass to a ``Runner`` along with a loaded run specification. Once the ``Runner`` has done its thing, the ``Executor`` performs some final reporting before returning. The initial context set up involves combining configuration from various sources, loading of required workloads, loading and installation of instruments and output processors, etc. Static validation of the combined configuration is also performed. """ # pylint: disable=R0915 def __init__(self): self.logger = logging.getLogger('executor') self.error_logged = False self.warning_logged = False self.target_manager = None def execute(self, config_manager, output): """ Execute the run specified by an agenda. Optionally, selectors may be used to only execute a subset of the specified agenda. Params:: :state: a ``ConfigManager`` containing processed configuration :output: an initialized ``RunOutput`` that will be used to store the results. """ signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED) signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED) self.logger.info('Initializing run') self.logger.debug('Finalizing run configuration.') config = config_manager.finalize() output.write_config(config) self.target_manager = TargetManager(config.run_config.device, config.run_config.device_config, output.basepath) self.logger.info('Initializing execution context') context = ExecutionContext(config_manager, self.target_manager, output) try: self.do_execute(context) except KeyboardInterrupt as e: context.run_output.status = Status.ABORTED log.log_error(e, self.logger) context.write_output() raise except Exception as e: context.run_output.status = Status.FAILED log.log_error(e, self.logger) context.write_output() raise finally: context.finalize() self.execute_postamble(context, output) signal.send(signal.RUN_COMPLETED, self, context) def do_execute(self, context): self.logger.info('Connecting to target') context.tm.initialize() if context.cm.run_config.reboot_policy.perform_initial_reboot: self.logger.info('Performing initial reboot.') attempts = context.cm.run_config.max_retries while attempts: try: self.target_manager.reboot(context) except TargetError as e: if attempts: attempts -= 1 else: raise e else: break context.output.set_target_info(self.target_manager.get_target_info()) self.logger.info('Generating jobs') context.cm.generate_jobs(context) context.write_job_specs() context.output.write_state() self.logger.info('Installing instruments') for instrument in context.cm.get_instruments(self.target_manager.target): instrumentation.install(instrument, context) instrumentation.validate() self.logger.info('Installing output processors') pm = ProcessorManager() for proc in context.cm.get_processors(): pm.install(proc, context) pm.validate() context.write_config() self.logger.info('Starting run') runner = Runner(context, pm) signal.send(signal.RUN_STARTED, self, context) runner.run() def execute_postamble(self, context, output): self.logger.info('Done.') duration = format_duration(output.info.duration) self.logger.info('Run duration: {}'.format(duration)) num_ran = context.run_state.num_completed_jobs status_summary = 'Ran a total of {} iterations: '.format(num_ran) counter = context.run_state.get_status_counts() parts = [] for status in reversed(Status.levels): if status in counter: parts.append('{} {}'.format(counter[status], status)) self.logger.info('{}{}'.format(status_summary, ', '.join(parts))) self.logger.info('Results can be found in {}'.format(output.basepath)) if self.error_logged: self.logger.warning('There were errors during execution.') self.logger.warning('Please see {}'.format(output.logfile)) elif self.warning_logged: self.logger.warning('There were warnings during execution.') self.logger.warning('Please see {}'.format(output.logfile)) def _error_signalled_callback(self, _): self.error_logged = True signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED) def _warning_signalled_callback(self, _): self.warning_logged = True signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED) def __str__(self): return 'executor' __repr__ = __str__ class Runner(object): """ Triggers running jobs and processing results Takes pre-initialized ExcecutionContext and ProcessorManager. Handles actually running the jobs, and triggers the ProcessorManager to handle processing job and run results. """ def __init__(self, context, pm): self.logger = logging.getLogger('runner') self.context = context self.pm = pm self.output = self.context.output self.config = self.context.cm def run(self): try: self.initialize_run() self.send(signal.RUN_INITIALIZED) with signal.wrap('JOB_QUEUE_EXECUTION', self, self.context): while self.context.job_queue: if self.context.run_interrupted: raise KeyboardInterrupt() self.run_next_job(self.context) except KeyboardInterrupt as e: log.log_error(e, self.logger) self.logger.info('Skipping remaining jobs.') self.context.skip_remaining_jobs() except Exception as e: message = e.args[0] if e.args else str(e) log.log_error(e, self.logger) self.logger.error('Skipping remaining jobs due to "{}".'.format(message)) self.context.skip_remaining_jobs() raise e finally: self.finalize_run() self.send(signal.RUN_FINALIZED) def initialize_run(self): self.logger.info('Initializing run') signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED) signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED) self.context.start_run() self.pm.initialize(self.context) with log.indentcontext(): self.context.initialize_jobs() self.context.write_state() def finalize_run(self): self.logger.info('Run completed') with log.indentcontext(): for job in self.context.completed_jobs: job.finalize(self.context) self.logger.info('Finalizing run') self.context.end_run() instrumentation.enable_all() self.pm.enable_all() with signal.wrap('RUN_OUTPUT_PROCESSED', self): self.pm.process_run_output(self.context) self.pm.export_run_output(self.context) self.pm.finalize(self.context) if self.context.reboot_policy.reboot_on_run_completion: self.logger.info('Rebooting target on run completion.') self.context.tm.reboot(self.context) signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED) signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED) def run_next_job(self, context): job = context.start_job() self.logger.info('Running job {}'.format(job.id)) try: log.indent() if self.context.reboot_policy.reboot_on_each_job: self.logger.info('Rebooting on new job.') self.context.tm.reboot(context) elif self.context.reboot_policy.reboot_on_each_spec and context.spec_changed: self.logger.info('Rebooting on new spec.') self.context.tm.reboot(context) with signal.wrap('JOB', self, context): context.tm.start() self.do_run_job(job, context) context.set_job_status(job, Status.OK) except (Exception, KeyboardInterrupt) as e: # pylint: disable=broad-except log.log_error(e, self.logger) if isinstance(e, KeyboardInterrupt): context.run_interrupted = True context.set_job_status(job, Status.ABORTED) raise e else: context.set_job_status(job, Status.FAILED) if isinstance(e, TargetNotRespondingError): raise e elif isinstance(e, TargetError): context.tm.verify_target_responsive(context) finally: self.logger.info('Completing job {}'.format(job.id)) self.send(signal.JOB_COMPLETED) context.tm.stop() context.end_job() log.dedent() self.check_job(job) def do_run_job(self, job, context): # pylint: disable=too-many-branches,too-many-statements rc = self.context.cm.run_config if job.workload.phones_home and not rc.allow_phone_home: self.logger.warning('Skipping job {} ({}) due to allow_phone_home=False' .format(job.id, job.workload.name)) self.context.skip_job(job) return context.set_job_status(job, Status.RUNNING) self.send(signal.JOB_STARTED) job.configure_augmentations(context, self.pm) with signal.wrap('JOB_TARGET_CONFIG', self, context): job.configure_target(context) try: job.setup(context) except Exception as e: context.set_job_status(job, Status.FAILED) log.log_error(e, self.logger) if isinstance(e, (TargetError, TimeoutError)): context.tm.verify_target_responsive(context) self.context.record_ui_state('setup-error') raise e try: try: job.run(context) except KeyboardInterrupt: context.run_interrupted = True context.set_job_status(job, Status.ABORTED) raise except Exception as e: context.set_job_status(job, Status.FAILED) log.log_error(e, self.logger) if isinstance(e, (TargetError, TimeoutError)): context.tm.verify_target_responsive(context) self.context.record_ui_state('run-error') raise e finally: try: with signal.wrap('JOB_OUTPUT_PROCESSED', self, context): job.process_output(context) self.pm.process_job_output(context) self.pm.export_job_output(context) except Exception as e: context.set_job_status(job, Status.PARTIAL) if isinstance(e, (TargetError, TimeoutError)): context.tm.verify_target_responsive(context) self.context.record_ui_state('output-error') raise except KeyboardInterrupt: context.run_interrupted = True context.set_status(Status.ABORTED) raise finally: # If setup was successfully completed, teardown must # run even if the job failed job.teardown(context) def check_job(self, job): rc = self.context.cm.run_config if job.status in rc.retry_on_status: if job.retries < rc.max_retries: msg = 'Job {} iteration {} completed with status {}. retrying...' self.logger.error(msg.format(job.id, job.iteration, job.status)) self.retry_job(job) self.context.move_failed(job) self.context.write_state() else: msg = 'Job {} iteration {} completed with status {}. '\ 'Max retries exceeded.' self.logger.error(msg.format(job.id, job.iteration, job.status)) self.context.failed_jobs += 1 self.send(signal.JOB_FAILED) if rc.bail_on_job_failure: raise ExecutionError('Job {} failed, bailing.'.format(job.id)) else: # status not in retry_on_status self.logger.info('Job completed with status {}'.format(job.status)) if job.status != 'ABORTED': self.context.successful_jobs += 1 else: self.context.failed_jobs += 1 self.send(signal.JOB_ABORTED) def retry_job(self, job): retry_job = Job(job.spec, job.iteration, self.context) retry_job.workload = job.workload retry_job.state = job.state retry_job.retries = job.retries + 1 self.context.set_job_status(retry_job, Status.PENDING, force=True) self.context.job_queue.insert(0, retry_job) self.send(signal.JOB_RESTARTED) def send(self, s): signal.send(s, self, self.context) def _error_signalled_callback(self, record): self.context.add_event(record.getMessage()) def _warning_signalled_callback(self, record): self.context.add_event(record.getMessage()) def __str__(self): return 'runner' ================================================ FILE: wa/framework/getters.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains the standard set of resource getters used by Workload Automation. """ import http.client import json import logging import os import shutil import sys import requests from wa import Parameter, settings, __file__ as _base_filepath from wa.framework.resource import ResourceGetter, SourcePriority, NO_ONE from wa.framework.exception import ResourceError from wa.utils.misc import (ensure_directory_exists as _d, atomic_write_path, ensure_file_directory_exists as _f, sha256, urljoin) from wa.utils.types import boolean, caseless_string # Because of use of Enum (dynamic attrs) # pylint: disable=no-member logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logger = logging.getLogger('resource') def get_by_extension(path, ext): if not ext.startswith('.'): ext = '.' + ext ext = caseless_string(ext) found = [] for entry in os.listdir(path): entry_ext = os.path.splitext(entry)[1] if entry_ext == ext: found.append(os.path.join(path, entry)) return found def get_generic_resource(resource, files): matches = [] for f in files: if resource.match(f): matches.append(f) if not matches: return None if len(matches) > 1: msg = 'Multiple matches for {}: {}' raise ResourceError(msg.format(resource, matches)) return matches[0] def get_path_matches(resource, files): matches = [] for f in files: if resource.match_path(f): matches.append(f) return matches # pylint: disable=too-many-return-statements def get_from_location(basepath, resource): if resource.kind == 'file': path = os.path.join(basepath, resource.path) if os.path.exists(path): return path elif resource.kind == 'executable': bin_dir = os.path.join(basepath, 'bin', resource.abi) if not os.path.exists(bin_dir): return None for entry in os.listdir(bin_dir): path = os.path.join(bin_dir, entry) if resource.match(path): return path elif resource.kind == 'revent': path = os.path.join(basepath, 'revent_files') if os.path.exists(path): files = get_by_extension(path, resource.kind) found_resource = get_generic_resource(resource, files) if found_resource: return found_resource files = get_by_extension(basepath, resource.kind) return get_generic_resource(resource, files) elif resource.kind in ['apk', 'jar']: files = get_by_extension(basepath, resource.kind) return get_generic_resource(resource, files) return None class Package(ResourceGetter): name = 'package' def register(self, resolver): resolver.register(self.get, SourcePriority.package) # pylint: disable=no-self-use def get(self, resource): if resource.owner == NO_ONE: basepath = os.path.join(os.path.dirname(_base_filepath), 'assets') else: modname = resource.owner.__module__ basepath = os.path.dirname(sys.modules[modname].__file__) return get_from_location(basepath, resource) class UserDirectory(ResourceGetter): name = 'user' def register(self, resolver): resolver.register(self.get, SourcePriority.local) # pylint: disable=no-self-use def get(self, resource): basepath = settings.dependencies_directory directory = _d(os.path.join(basepath, resource.owner.name)) return get_from_location(directory, resource) class Http(ResourceGetter): name = 'http' description = """ Downloads resources from a server based on an index fetched from the specified URL. Given a URL, this will try to fetch ``/index.json``. The index file maps extension names to a list of corresponing asset descriptons. Each asset description continas a path (relative to the base URL) of the resource and a SHA256 hash, so that this Getter can verify whether the resource on the remote has changed. For example, let's assume we want to get the APK file for workload "foo", and that assets are hosted at ``http://example.com/assets``. This Getter will first try to donwload ``http://example.com/assests/index.json``. The index file may contian something like :: { "foo": [ { "path": "foo-app.apk", "sha256": "b14530bb47e04ed655ac5e80e69beaa61c2020450e18638f54384332dffebe86" }, { "path": "subdir/some-other-asset.file", "sha256": "48d9050e9802246d820625717b72f1c2ba431904b8484ca39befd68d1dbedfff" } ] } This Getter will look through the list of assets for "foo" (in this case, two) check the paths until it finds one matching the resource (in this case, "foo-app.apk"). Finally, it will try to dowload that file relative to the base URL and extension name (in this case, "http://example.com/assets/foo/foo-app.apk"). The downloaded version will be cached locally, so that in the future, the getter will check the SHA256 hash of the local file against the one advertised inside index.json, and provided that hasn't changed, it won't try to download the file again. """ parameters = [ Parameter('url', global_alias='remote_assets_url', description=""" URL of the index file for assets on an HTTP server. """), Parameter('username', description=""" User name for authenticating with assets URL """), Parameter('password', description=""" Password for authenticationg with assets URL """), Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets', description=""" If ``True``, will always attempt to fetch assets from the remote, even if a local cached copy is available. """), Parameter('chunk_size', kind=int, default=1024, description=""" Chunk size for streaming large assets. """), ] def __init__(self, **kwargs): super(Http, self).__init__(**kwargs) self.logger = logger self.index = {} def register(self, resolver): resolver.register(self.get, SourcePriority.remote) def get(self, resource): if not resource.owner: return # TODO: add support for unowned resources if not self.index: try: self.index = self.fetch_index() except requests.exceptions.RequestException as e: msg = 'Skipping HTTP getter due to connection error: {}' self.logger.debug(msg.format(e.message)) return if resource.kind == 'apk': # APKs must always be downloaded to run ApkInfo for version # information. return self.resolve_apk(resource) else: asset = self.resolve_resource(resource) if not asset: return return self.download_asset(asset, resource.owner.name) def fetch_index(self): if not self.url: return {} index_url = urljoin(self.url, 'index.json') response = self.geturl(index_url) if response.status_code != http.client.OK: message = 'Could not fetch "{}"; received "{} {}"' self.logger.error(message.format(index_url, response.status_code, response.reason)) return {} content = response.content.decode('utf-8') return json.loads(content) def download_asset(self, asset, owner_name): url = urljoin(self.url, owner_name, asset['path']) local_path = _f(os.path.join(settings.dependencies_directory, '__remote', owner_name, asset['path'].replace('/', os.sep))) if os.path.exists(local_path) and not self.always_fetch: local_sha = sha256(local_path) if local_sha == asset['sha256']: self.logger.debug('Local SHA256 matches; not re-downloading') return local_path self.logger.debug('Downloading {}'.format(url)) response = self.geturl(url, stream=True) if response.status_code != http.client.OK: message = 'Could not download asset "{}"; received "{} {}"' self.logger.warning(message.format(url, response.status_code, response.reason)) return with atomic_write_path(local_path) as at_path: with open(at_path, 'wb') as wfh: for chunk in response.iter_content(chunk_size=self.chunk_size): wfh.write(chunk) return local_path def geturl(self, url, stream=False): if self.username: auth = (self.username, self.password) else: auth = None return requests.get(url, auth=auth, stream=stream) def resolve_apk(self, resource): assets = self.index.get(resource.owner.name, {}) if not assets: return None asset_map = {a['path']: a for a in assets} paths = get_path_matches(resource, list(asset_map.keys())) local_paths = [] for path in paths: local_paths.append(self.download_asset(asset_map[path], resource.owner.name)) for path in local_paths: if resource.match(path): return path def resolve_resource(self, resource): # pylint: disable=too-many-branches,too-many-locals assets = self.index.get(resource.owner.name, {}) if not assets: return {} asset_map = {a['path']: a for a in assets} if resource.kind in ['jar', 'revent']: path = get_generic_resource(resource, list(asset_map.keys())) if path: return asset_map[path] elif resource.kind == 'executable': path = '/'.join(['bin', resource.abi, resource.filename]) for asset in assets: if asset['path'].lower() == path.lower(): return asset else: # file for asset in assets: if asset['path'].lower() == resource.path.lower(): return asset class Filer(ResourceGetter): name = 'filer' description = """ Finds resources on a (locally mounted) remote filer and caches them locally. This assumes that the filer is mounted on the local machine (e.g. as a samba share). """ parameters = [ Parameter('remote_path', global_alias='remote_assets_path', default=settings.assets_repository, description=""" Path, on the local system, where the assets are located. """), Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets', description=""" If ``True``, will always attempt to fetch assets from the remote, even if a local cached copy is available. """), ] def register(self, resolver): resolver.register(self.get, SourcePriority.lan) def get(self, resource): if resource.owner: remote_path = os.path.join(self.remote_path, resource.owner.name) local_path = os.path.join(settings.dependencies_directory, '__filer', resource.owner.dependencies_directory) return self.try_get_resource(resource, remote_path, local_path) else: # No owner result = None for entry in os.listdir(remote_path): remote_path = os.path.join(self.remote_path, entry) local_path = os.path.join(settings.dependencies_directory, '__filer', settings.dependencies_directory, entry) result = self.try_get_resource(resource, remote_path, local_path) if result: break return result def try_get_resource(self, resource, remote_path, local_path): if not self.always_fetch: result = get_from_location(local_path, resource) if result: return result if not os.path.exists(local_path): return None if os.path.exists(remote_path): # Didn't find it cached locally; now check the remoted result = get_from_location(remote_path, resource) if not result: return result else: # remote path is not set return None # Found it remotely, cache locally, then return it local_full_path = os.path.join(_d(local_path), os.path.basename(result)) self.logger.debug('cp {} {}'.format(result, local_full_path)) shutil.copy(result, local_full_path) return result ================================================ FILE: wa/framework/host.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from wa.framework import pluginloader from wa.framework.configuration.core import (settings, ConfigurationPoint, MetaConfiguration, RunConfiguration) from wa.framework.configuration.default import (generate_default_config, write_param_yaml) from wa.framework.configuration.plugin_cache import PluginCache from wa.utils.misc import load_struct_from_python from wa.utils.serializer import yaml from wa.utils.types import identifier # Have to disable this due to dynamic attributes # pylint: disable=no-member def init_user_directory(overwrite_existing=False): # pylint: disable=R0914 """ Initialise a fresh user directory. """ if os.path.exists(settings.user_directory): if not overwrite_existing: raise RuntimeError('Environment {} already exists.'.format(settings.user_directory)) shutil.rmtree(settings.user_directory) os.makedirs(settings.user_directory) os.makedirs(settings.dependencies_directory) os.makedirs(settings.plugins_directory) os.makedirs(settings.cache_directory) generate_default_config(os.path.join(settings.user_directory, 'config.yaml')) if os.getenv('USER') == 'root': # If running with sudo on POSIX, change the ownership to the real user. real_user = os.getenv('SUDO_USER') if real_user: # pylint: disable=import-outside-toplevel import pwd # done here as module won't import on win32 user_entry = pwd.getpwnam(real_user) uid, gid = user_entry.pw_uid, user_entry.pw_gid os.chown(settings.user_directory, uid, gid) # why, oh why isn't there a recusive=True option for os.chown? for root, dirs, files in os.walk(settings.user_directory): for d in dirs: os.chown(os.path.join(root, d), uid, gid) for f in files: os.chown(os.path.join(root, f), uid, gid) def init_config(): """ If configuration file is missing try to convert WA2 config if present otherwise initialize fresh config file """ wa2_config_file = os.path.join(settings.user_directory, 'config.py') wa3_config_file = os.path.join(settings.user_directory, 'config.yaml') if os.path.exists(wa2_config_file): convert_wa2_agenda(wa2_config_file, wa3_config_file) else: generate_default_config(wa3_config_file) def convert_wa2_agenda(filepath, output_path): """ Convert WA2 .py config file to a WA3 .yaml config file. """ orig_agenda = load_struct_from_python(filepath) new_agenda = {'augmentations': []} config_points = MetaConfiguration.config_points + RunConfiguration.config_points # Add additional config points to extract from config file. # Also allows for aliasing of renamed parameters config_points.extend([ ConfigurationPoint( 'augmentations', aliases=["instruments", "processors", "instrumentation", "output_processors", "augment", "result_processors"], description=''' The augmentations enabled by default. This combines the "instrumentation" and "result_processors" from previous versions of WA (the old entries are now aliases for this). '''), ConfigurationPoint( 'device_config', description='''Generic configuration for device.''', default={}), ConfigurationPoint( 'cleanup_assets', aliases=['clean_up'], description='''Specify whether to clean up assets deployed to the target''', default=True), ]) for param in list(orig_agenda.keys()): for cfg_point in config_points: if param == cfg_point.name or param in cfg_point.aliases: if cfg_point.name == 'augmentations': new_agenda['augmentations'].extend(orig_agenda.pop(param)) else: new_agenda[cfg_point.name] = format_parameter(orig_agenda.pop(param)) with open(output_path, 'w') as output: for param in config_points: entry = {param.name: new_agenda.get(param.name, param.default)} write_param_yaml(entry, param, output) # Convert plugin configuration output.write("# Plugin Configuration\n") for param in list(orig_agenda.keys()): if pluginloader.has_plugin(param): entry = {param: orig_agenda.pop(param)} yaml.dump(format_parameter(entry), output, default_flow_style=False) output.write("\n") # Write any additional aliased parameters into new config plugin_cache = PluginCache() output.write("# Additional global aliases\n") for param in list(orig_agenda.keys()): if plugin_cache.is_global_alias(param): entry = {param: orig_agenda.pop(param)} yaml.dump(format_parameter(entry), output, default_flow_style=False) output.write("\n") def format_parameter(param): if isinstance(param, dict): return {identifier(k): v for k, v in param.items()} else: return param ================================================ FILE: wa/framework/instrument.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Adding New Instrument ===================== Any new instrument should be a subclass of Instrument and it must have a name. When a new instrument is added to Workload Automation, the methods of the new instrument will be found automatically and hooked up to the supported signals. Once a signal is broadcasted, the corresponding registered method is invoked. Each method in Instrument must take two arguments, which are self and context. Supported signals can be found in [... link to signals ...] To make implementations easier and common, the basic steps to add new instrument is similar to the steps to add new workload. Hence, the following methods are sufficient to implement to add new instrument: - setup: This method is invoked after the workload is setup. All the necessary setups should go inside this method. Setup, includes operations like, pushing the files to the target device, install them, clear logs, etc. - start: It is invoked just before the workload start execution. Here is where instrument measures start being registered/taken. - stop: It is invoked just after the workload execution stops. The measures should stop being taken/registered. - update_output: It is invoked after the workload updated its result. update_output is where the taken measures are added to the output so it can be processed by Workload Automation. - teardown is invoked after the workload is teared down. It is a good place to clean any logs generated by the instrument. For example, to add an instrument which will trace device errors, we subclass Instrument and overwrite the variable name.:: #BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace') class TraceErrorsInstrument(Instrument): name = 'trace-errors' def __init__(self, device): super(TraceErrorsInstrument, self).__init__(device) self.trace_on_device = os.path.join(self.device.working_directory, 'trace') We then declare and implement the aforementioned methods. For the setup method, we want to push the file to the target device and then change the file mode to 755 :: def setup(self, context): self.device.push(BINARY_FILE, self.device.working_directory) self.device.execute('chmod 755 {}'.format(self.trace_on_device)) Then we implemented the start method, which will simply run the file to start tracing. :: def start(self, context): self.device.execute('{} start'.format(self.trace_on_device)) Lastly, we need to stop tracing once the workload stops and this happens in the stop method:: def stop(self, context): self.device.execute('{} stop'.format(self.trace_on_device)) The generated output can be updated inside update_output, or if it is trace, we just pull the file to the host device. context has an output variable which has add_metric method. It can be used to add the instruments results metrics to the final result for the workload. The method can be passed 4 params, which are metric key, value, unit and lower_is_better, which is a boolean. :: def update_output(self, context): # pull the trace file to the device result = os.path.join(self.device.working_directory, 'trace.txt') self.device.pull(result, context.working_directory) # parse the file if needs to be parsed, or add result to # context.result At the end, we might want to delete any files generated by the instruments and the code to clear these file goes in teardown method. :: def teardown(self, context): self.device.remove(os.path.join(self.device.working_directory, 'trace.txt')) """ import logging import inspect from collections import OrderedDict from wa.framework import signal from wa.framework.plugin import TargetedPlugin from wa.framework.exception import (TargetNotRespondingError, TimeoutError, # pylint: disable=redefined-builtin WorkloadError, TargetError) from wa.utils.log import log_error from wa.utils.misc import isiterable from wa.utils.types import identifier, level logger = logging.getLogger('instruments') # Maps method names onto signals the should be registered to. # Note: the begin/end signals are paired -- if a begin_ signal is sent, # then the corresponding end_ signal is guaranteed to also be sent. # Note: using OrderedDict to preserve logical ordering for the table generated # in the documentation SIGNAL_MAP = OrderedDict([ # Below are "aliases" for some of the more common signals to allow # instruments to have similar structure to workloads ('initialize', signal.RUN_INITIALIZED), ('setup', signal.BEFORE_WORKLOAD_SETUP), ('start', signal.BEFORE_WORKLOAD_EXECUTION), ('stop', signal.AFTER_WORKLOAD_EXECUTION), ('process_workload_output', signal.SUCCESSFUL_WORKLOAD_OUTPUT_UPDATE), ('update_output', signal.AFTER_WORKLOAD_OUTPUT_UPDATE), ('teardown', signal.AFTER_WORKLOAD_TEARDOWN), ('finalize', signal.RUN_FINALIZED), ('on_run_start', signal.RUN_STARTED), ('on_run_end', signal.RUN_COMPLETED), ('on_job_start', signal.JOB_STARTED), ('on_job_restart', signal.JOB_RESTARTED), ('on_job_end', signal.JOB_COMPLETED), ('on_job_failure', signal.JOB_FAILED), ('on_job_abort', signal.JOB_ABORTED), ('before_job_queue_execution', signal.BEFORE_JOB_QUEUE_EXECUTION), ('on_successful_job_queue_exection', signal.SUCCESSFUL_JOB_QUEUE_EXECUTION), ('after_job_queue_execution', signal.AFTER_JOB_QUEUE_EXECUTION), ('before_job', signal.BEFORE_JOB), ('on_successful_job', signal.SUCCESSFUL_JOB), ('after_job', signal.AFTER_JOB), ('before_processing_job_output', signal.BEFORE_JOB_OUTPUT_PROCESSED), ('on_successfully_processing_job', signal.SUCCESSFUL_JOB_OUTPUT_PROCESSED), ('after_processing_job_output', signal.AFTER_JOB_OUTPUT_PROCESSED), ('before_reboot', signal.BEFORE_REBOOT), ('on_successful_reboot', signal.SUCCESSFUL_REBOOT), ('after_reboot', signal.AFTER_REBOOT), ('on_error', signal.ERROR_LOGGED), ('on_warning', signal.WARNING_LOGGED), ]) def get_priority(func): return getattr(getattr(func, 'im_func', func), 'priority', signal.CallbackPriority.normal) def priority(priority): # pylint: disable=redefined-outer-name def decorate(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.__name__ = func.__name__ if priority in signal.CallbackPriority.levels: wrapper.priority = signal.CallbackPriority(priority) else: if not isinstance(priority, int): msg = 'Invalid priorty "{}"; must be an int or one of {}' raise ValueError(msg.format(priority, signal.CallbackPriority.values)) wrapper.priority = level('custom', priority) return wrapper return decorate extremely_slow = priority(signal.CallbackPriority.extremely_low) very_slow = priority(signal.CallbackPriority.very_low) slow = priority(signal.CallbackPriority.low) normal = priority(signal.CallbackPriority.normal) fast = priority(signal.CallbackPriority.high) very_fast = priority(signal.CallbackPriority.very_high) extremely_fast = priority(signal.CallbackPriority.extremely_high) def hostside(func): """ Used as a hint that the callback only performs actions on the host and does not rely on an active connection to the target. This means the callback will be invoked even if the target is thought to be unresponsive. """ func.is_hostside = True return func def is_hostside(func): return getattr(func, 'is_hostside', False) installed = [] def is_installed(instrument): if isinstance(instrument, Instrument): if instrument in installed: return True if instrument.name in [i.name for i in installed]: return True elif isinstance(instrument, type): if instrument in [i.__class__ for i in installed]: return True else: # assume string if identifier(instrument) in [identifier(i.name) for i in installed]: return True return False def is_enabled(instrument): if isinstance(instrument, (Instrument, type)): name = instrument.name else: # assume string name = instrument try: installed_instrument = get_instrument(name) return installed_instrument.is_enabled except ValueError: return False failures_detected = False def reset_failures(): global failures_detected # pylint: disable=W0603 failures_detected = False def check_failures(): result = failures_detected reset_failures() return result class ManagedCallback(object): """ This wraps instruments' callbacks to ensure that errors do not interfer with run execution. """ def __init__(self, instrument, callback): self.instrument = instrument self.callback = callback self.is_hostside = is_hostside(callback) def __call__(self, context): if self.instrument.is_enabled: try: if not context.tm.is_responsive and not self.is_hostside: logger.debug("Target unresponsive; skipping callback {}".format(self.callback)) return self.callback(context) except (KeyboardInterrupt, TargetNotRespondingError, TimeoutError): # pylint: disable=W0703 raise except Exception as e: # pylint: disable=W0703 logger.error('Error in instrument {}'.format(self.instrument.name)) global failures_detected # pylint: disable=W0603 failures_detected = True log_error(e, logger) context.add_event(e.args[0] if e.args else str(e)) if isinstance(e, WorkloadError): context.set_status('FAILED') elif isinstance(e, (TargetError, TimeoutError)): context.tm.verify_target_responsive(context) else: if context.current_job: context.set_status('PARTIAL') else: raise def __repr__(self): text = 'ManagedCallback({}, {})' return text.format(self.instrument.name, self.callback.__func__.__name__) __str__ = __repr__ # Need this to keep track of callbacks, because the dispatcher only keeps # weak references, so if the callbacks aren't referenced elsewhere, they will # be deallocated before they've had a chance to be invoked. _callbacks = [] def install(instrument, context): """ This will look for methods (or any callable members) with specific names in the instrument and hook them up to the corresponding signals. :param instrument: Instrument instance to install. """ # pylint: disable=redefined-outer-name logger.debug('Installing instrument %s.', instrument) if is_installed(instrument): msg = 'Instrument {} is already installed.' raise ValueError(msg.format(instrument.name)) for attr_name in dir(instrument): if attr_name not in SIGNAL_MAP: continue attr = getattr(instrument, attr_name) if not callable(attr): msg = 'Attribute {} not callable in {}.' raise ValueError(msg.format(attr_name, instrument)) argspec = inspect.getfullargspec(attr) arg_num = len(argspec.args) # Instrument callbacks will be passed exactly two arguments: self # (the instrument instance to which the callback is bound) and # context. However, we also allow callbacks to capture the context # in variable arguments (declared as "*args" in the definition). if arg_num > 2 or (arg_num < 2 and argspec.varargs is None): message = '{} must take exactly 2 positional arguments; {} given.' raise ValueError(message.format(attr_name, arg_num)) priority = get_priority(attr) hostside = ' [hostside]' if is_hostside(attr) else '' logger.debug('\tConnecting %s to %s with priority %s(%d)%s', attr.__name__, SIGNAL_MAP[attr_name], priority.name, priority.value, hostside) mc = ManagedCallback(instrument, attr) _callbacks.append(mc) signal.connect(mc, SIGNAL_MAP[attr_name], priority=priority.value) instrument.logger.context = context installed.append(instrument) context.add_augmentation(instrument) def uninstall(instrument): instrument = get_instrument(instrument) installed.remove(instrument) def validate(): for instrument in installed: instrument.validate() def get_instrument(inst): if isinstance(inst, Instrument): return inst for installed_inst in installed: if identifier(installed_inst.name) == identifier(inst): return installed_inst raise ValueError('Instrument {} is not installed'.format(inst)) def disable_all(): for instrument in installed: _disable_instrument(instrument) def enable_all(): for instrument in installed: _enable_instrument(instrument) def enable(to_enable): if isiterable(to_enable): for inst in to_enable: _enable_instrument(inst) else: _enable_instrument(to_enable) def disable(to_disable): if isiterable(to_disable): for inst in to_disable: _disable_instrument(inst) else: _disable_instrument(to_disable) def _enable_instrument(inst): inst = get_instrument(inst) if not inst.is_broken: logger.debug('Enabling instrument {}'.format(inst.name)) inst.is_enabled = True else: logger.debug('Not enabling broken instrument {}'.format(inst.name)) def _disable_instrument(inst): inst = get_instrument(inst) if inst.is_enabled: logger.debug('Disabling instrument {}'.format(inst.name)) inst.is_enabled = False def get_enabled(): return [i for i in installed if i.is_enabled] def get_disabled(): return [i for i in installed if not i.is_enabled] class Instrument(TargetedPlugin): """ Base class for instrument implementations. """ kind = "instrument" def __init__(self, *args, **kwargs): super(Instrument, self).__init__(*args, **kwargs) self.is_enabled = True self.is_broken = False ================================================ FILE: wa/framework/job.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Because of use of Enum (dynamic attrs) # pylint: disable=no-member import logging from copy import copy from datetime import datetime from wa.framework import pluginloader, signal, instrument from wa.framework.configuration.core import Status from wa.utils.log import indentcontext from wa.framework.run import JobState class Job(object): _workload_cache = {} @property def id(self): return self.spec.id @property def label(self): return self.spec.label @property def status(self): return self.state.status @property def has_been_initialized(self): return self._has_been_initialized @property def retries(self): return self.state.retries @status.setter def status(self, value): self.state.status = value self.state.timestamp = datetime.utcnow() if self.output: self.output.status = value @retries.setter def retries(self, value): self.state.retries = value def __init__(self, spec, iteration, context): self.logger = logging.getLogger('job') self.spec = spec self.iteration = iteration self.context = context self.workload = None self.output = None self.run_time = None self.classifiers = copy(self.spec.classifiers) self._has_been_initialized = False self.state = JobState(self.id, self.label, self.iteration, Status.NEW) def load(self, target, loader=pluginloader): self.logger.info('Loading job {}'.format(self)) if self.id not in self._workload_cache: self.workload = loader.get_workload(self.spec.workload_name, target, **self.spec.workload_parameters) self.workload.init_resources(self.context) self.workload.validate() self._workload_cache[self.id] = self.workload else: self.workload = self._workload_cache[self.id] def set_output(self, output): output.classifiers = copy(self.classifiers) self.output = output def initialize(self, context): self.logger.info('Initializing job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_INITIALIZED', self, context): self.workload.logger.context = context self.workload.initialize(context) self.set_status(Status.PENDING) self._has_been_initialized = True def configure_augmentations(self, context, pm): self.logger.info('Configuring augmentations') with indentcontext(): instruments_to_enable = set() output_processors_to_enable = set() enabled_instruments = set(i.name for i in instrument.get_enabled()) enabled_output_processors = set(p.name for p in pm.get_enabled()) for augmentation in list(self.spec.augmentations.values()): augmentation_cls = context.cm.plugin_cache.get_plugin_class(augmentation) if augmentation_cls.kind == 'instrument': instruments_to_enable.add(augmentation) elif augmentation_cls.kind == 'output_processor': output_processors_to_enable.add(augmentation) # Disable unrequired instruments for instrument_name in enabled_instruments.difference(instruments_to_enable): instrument.disable(instrument_name) # Enable additional instruments for instrument_name in instruments_to_enable.difference(enabled_instruments): instrument.enable(instrument_name) # Disable unrequired output_processors for processor in enabled_output_processors.difference(output_processors_to_enable): pm.disable(processor) # Enable additional output_processors for processor in output_processors_to_enable.difference(enabled_output_processors): pm.enable(processor) def configure_target(self, context): self.logger.info('Configuring target for job {}'.format(self)) with indentcontext(): context.tm.commit_runtime_parameters(self.spec.runtime_parameters) def setup(self, context): self.logger.info('Setting up job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_SETUP', self, context): self.workload.setup(context) def run(self, context): self.logger.info('Running job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_EXECUTION', self, context): start_time = datetime.utcnow() try: self.workload.run(context) finally: self.run_time = datetime.utcnow() - start_time def process_output(self, context): if not context.tm.is_responsive: self.logger.info('Target unresponsive; not processing job output.') return self.logger.info('Processing output for job {}'.format(self)) with indentcontext(): if self.status != Status.FAILED: with signal.wrap('WORKLOAD_RESULT_EXTRACTION', self, context): self.workload.extract_results(context) context.extract_results() with signal.wrap('WORKLOAD_OUTPUT_UPDATE', self, context): self.workload.update_output(context) def teardown(self, context): if not context.tm.is_responsive: self.logger.info('Target unresponsive; not tearing down.') return self.logger.info('Tearing down job {}'.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_TEARDOWN', self, context): self.workload.teardown(context) def finalize(self, context): if not self._has_been_initialized: return if not context.tm.is_responsive: self.logger.info('Target unresponsive; not finalizing.') return self.logger.info('Finalizing job {} '.format(self)) with indentcontext(): with signal.wrap('WORKLOAD_FINALIZED', self, context): self.workload.finalize(context) def set_status(self, status, force=False): status = Status(status) if force or self.status < status: self.status = status def add_classifier(self, name, value, overwrite=False): if name in self.classifiers and not overwrite: raise ValueError('Cannot overwrite "{}" classifier.'.format(name)) self.classifiers[name] = value def __str__(self): return '{} ({}) [{}]'.format(self.id, self.label, self.iteration) def __repr__(self): return 'Job({})'.format(self) ================================================ FILE: wa/framework/output.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # try: import psycopg2 from psycopg2 import Error as Psycopg2Error except ImportError: psycopg2 = None Psycopg2Error = None import logging import os import shutil import tarfile import tempfile from collections import OrderedDict, defaultdict from copy import copy, deepcopy from datetime import datetime from io import StringIO import devlib from wa.framework.configuration.core import JobSpec, Status from wa.framework.configuration.execution import CombinedConfig from wa.framework.exception import HostError, SerializerSyntaxError, ConfigError from wa.framework.run import RunState, RunInfo from wa.framework.target.info import TargetInfo from wa.framework.version import get_wa_version_with_commit from wa.utils.doc import format_simple_table from wa.utils.misc import (touch, ensure_directory_exists, isiterable, format_ordered_dict, safe_extract) from wa.utils.postgres import get_schema_versions from wa.utils.serializer import write_pod, read_pod, Podable, json from wa.utils.types import enum, numeric logger = logging.getLogger('output') class Output(object): kind = None @property def resultfile(self): return os.path.join(self.basepath, 'result.json') @property def event_summary(self): num_events = len(self.events) if num_events: lines = self.events[0].message.split('\n') message = '({} event(s)): {}' if num_events > 1 or len(lines) > 1: message += '[...]' return message.format(num_events, lines[0]) return '' @property def status(self): if self.result is None: return None return self.result.status @status.setter def status(self, value): self.result.status = value @property def metrics(self): if self.result is None: return [] return self.result.metrics @property def artifacts(self): if self.result is None: return [] return self.result.artifacts @property def classifiers(self): if self.result is None: return OrderedDict() return self.result.classifiers @classifiers.setter def classifiers(self, value): if self.result is None: msg = 'Attempting to set classifiers before output has been set' raise RuntimeError(msg) self.result.classifiers = value @property def events(self): if self.result is None: return [] return self.result.events @property def metadata(self): if self.result is None: return {} return self.result.metadata def __init__(self, path): self.basepath = path self.result = None def reload(self): try: if os.path.isdir(self.basepath): pod = read_pod(self.resultfile) self.result = Result.from_pod(pod) else: self.result = Result() self.result.status = Status.PENDING except Exception as e: # pylint: disable=broad-except self.result = Result() self.result.status = Status.UNKNOWN self.add_event(str(e)) def write_result(self): write_pod(self.result.to_pod(), self.resultfile) def get_path(self, subpath): return os.path.join(self.basepath, subpath.strip(os.sep)) def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None): self.result.add_metric(name, value, units, lower_is_better, classifiers) def add_artifact(self, name, path, kind, description=None, classifiers=None): if not os.path.exists(path): path = self.get_path(path) if not os.path.exists(path): msg = 'Attempting to add non-existing artifact: {}' raise HostError(msg.format(path)) is_dir = os.path.isdir(path) path = os.path.relpath(path, self.basepath) self.result.add_artifact(name, path, kind, description, classifiers, is_dir) def add_event(self, message): self.result.add_event(message) def get_metric(self, name): return self.result.get_metric(name) def get_artifact(self, name): return self.result.get_artifact(name) def get_artifact_path(self, name): artifact = self.get_artifact(name) return self.get_path(artifact.path) def add_classifier(self, name, value, overwrite=False): self.result.add_classifier(name, value, overwrite) def add_metadata(self, key, *args, **kwargs): self.result.add_metadata(key, *args, **kwargs) def update_metadata(self, key, *args): self.result.update_metadata(key, *args) def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, os.path.basename(self.basepath)) def __str__(self): return os.path.basename(self.basepath) class RunOutputCommon(object): ''' Split out common functionality to form a second base of the RunOutput classes ''' @property def run_config(self): if self._combined_config: return self._combined_config.run_config @property def settings(self): if self._combined_config: return self._combined_config.settings def get_job_spec(self, spec_id): for spec in self.job_specs: if spec.id == spec_id: return spec return None def list_workloads(self): workloads = [] for job in self.jobs: if job.label not in workloads: workloads.append(job.label) return workloads class RunOutput(Output, RunOutputCommon): kind = 'run' @property def logfile(self): return os.path.join(self.basepath, 'run.log') @property def metadir(self): return os.path.join(self.basepath, '__meta') @property def infofile(self): return os.path.join(self.metadir, 'run_info.json') @property def statefile(self): return os.path.join(self.basepath, '.run_state.json') @property def configfile(self): return os.path.join(self.metadir, 'config.json') @property def targetfile(self): return os.path.join(self.metadir, 'target_info.json') @property def jobsfile(self): return os.path.join(self.metadir, 'jobs.json') @property def raw_config_dir(self): return os.path.join(self.metadir, 'raw_config') @property def failed_dir(self): path = os.path.join(self.basepath, '__failed') return ensure_directory_exists(path) @property def augmentations(self): run_augs = set([]) for job in self.jobs: for aug in job.spec.augmentations: run_augs.add(aug) return list(run_augs) def __init__(self, path): super(RunOutput, self).__init__(path) self.info = None self.state = None self.result = None self.target_info = None self._combined_config = None self.jobs = [] self.job_specs = [] if (not os.path.isfile(self.statefile) or not os.path.isfile(self.infofile)): msg = '"{}" does not exist or is not a valid WA output directory.' raise ValueError(msg.format(self.basepath)) self.reload() def reload(self): super(RunOutput, self).reload() self.info = RunInfo.from_pod(read_pod(self.infofile)) self.state = RunState.from_pod(read_pod(self.statefile)) if os.path.isfile(self.configfile): self._combined_config = CombinedConfig.from_pod(read_pod(self.configfile)) if os.path.isfile(self.targetfile): self.target_info = TargetInfo.from_pod(read_pod(self.targetfile)) if os.path.isfile(self.jobsfile): self.job_specs = self.read_job_specs() for job_state in self.state.jobs.values(): job_path = os.path.join(self.basepath, job_state.output_name) job = JobOutput(job_path, job_state.id, job_state.label, job_state.iteration, job_state.retries) job.status = job_state.status job.spec = self.get_job_spec(job.id) if job.spec is None: logger.warning('Could not find spec for job {}'.format(job.id)) self.jobs.append(job) def write_info(self): write_pod(self.info.to_pod(), self.infofile) def write_state(self): write_pod(self.state.to_pod(), self.statefile) def write_config(self, config): self._combined_config = config write_pod(config.to_pod(), self.configfile) def read_config(self): if not os.path.isfile(self.configfile): return None return CombinedConfig.from_pod(read_pod(self.configfile)) def set_target_info(self, ti): self.target_info = ti write_pod(ti.to_pod(), self.targetfile) def write_job_specs(self, job_specs): job_specs[0].to_pod() js_pod = {'jobs': [js.to_pod() for js in job_specs]} write_pod(js_pod, self.jobsfile) def read_job_specs(self): if not os.path.isfile(self.jobsfile): return None pod = read_pod(self.jobsfile) return [JobSpec.from_pod(jp) for jp in pod['jobs']] def move_failed(self, job_output): name = os.path.basename(job_output.basepath) attempt = job_output.retry + 1 failed_name = '{}-attempt{:02}'.format(name, attempt) failed_path = os.path.join(self.failed_dir, failed_name) if os.path.exists(failed_path): raise ValueError('Path {} already exists'.format(failed_path)) shutil.move(job_output.basepath, failed_path) job_output.basepath = failed_path class JobOutput(Output): kind = 'job' # pylint: disable=redefined-builtin def __init__(self, path, id, label, iteration, retry): super(JobOutput, self).__init__(path) self.id = id self.label = label self.iteration = iteration self.retry = retry self.result = None self.spec = None self.reload() @property def augmentations(self): job_augs = set([]) for aug in self.spec.augmentations: job_augs.add(aug) return list(job_augs) class Result(Podable): _pod_serialization_version = 1 @staticmethod def from_pod(pod): instance = super(Result, Result).from_pod(pod) instance.status = Status.from_pod(pod['status']) instance.metrics = [Metric.from_pod(m) for m in pod['metrics']] instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']] instance.events = [Event.from_pod(e) for e in pod['events']] instance.classifiers = pod.get('classifiers', OrderedDict()) instance.metadata = pod.get('metadata', OrderedDict()) return instance def __init__(self): # pylint: disable=no-member super(Result, self).__init__() self.status = Status.NEW self.metrics = [] self.artifacts = [] self.events = [] self.classifiers = OrderedDict() self.metadata = OrderedDict() def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None): metric = Metric(name, value, units, lower_is_better, classifiers) logger.debug('Adding metric: {}'.format(metric)) self.metrics.append(metric) def add_artifact(self, name, path, kind, description=None, classifiers=None, is_dir=False): artifact = Artifact(name, path, kind, description=description, classifiers=classifiers, is_dir=is_dir) logger.debug('Adding artifact: {}'.format(artifact)) self.artifacts.append(artifact) def add_event(self, message): self.events.append(Event(message)) def get_metric(self, name): for metric in self.metrics: if metric.name == name: return metric return None def get_artifact(self, name): for artifact in self.artifacts: if artifact.name == name: return artifact raise HostError('Artifact "{}" not found'.format(name)) def add_classifier(self, name, value, overwrite=False): if name in self.classifiers and not overwrite: raise ValueError('Cannot overwrite "{}" classifier.'.format(name)) self.classifiers[name] = value for metric in self.metrics: if name in metric.classifiers and not overwrite: raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, metric)) metric.classifiers[name] = value for artifact in self.artifacts: if name in artifact.classifiers and not overwrite: raise ValueError('Cannot overwrite "{}" classifier; clashes with {}.'.format(name, artifact)) artifact.classifiers[name] = value def add_metadata(self, key, *args, **kwargs): force = kwargs.pop('force', False) if kwargs: msg = 'Unexpected keyword arguments: {}' raise ValueError(msg.format(kwargs)) if key in self.metadata and not force: msg = 'Metadata with key "{}" already exists.' raise ValueError(msg.format(key)) if len(args) == 1: value = args[0] elif len(args) == 2: value = {args[0]: args[1]} elif not args: value = None else: raise ValueError("Unexpected arguments: {}".format(args)) self.metadata[key] = value def update_metadata(self, key, *args): if not args: del self.metadata[key] return if key not in self.metadata: return self.add_metadata(key, *args) if hasattr(self.metadata[key], 'items'): if len(args) == 2: self.metadata[key][args[0]] = args[1] elif len(args) > 2: # assume list of key-value pairs for k, v in args: self.metadata[key][k] = v elif hasattr(args[0], 'items'): for k, v in args[0].items(): self.metadata[key][k] = v else: raise ValueError('Invalid value for key "{}": {}'.format(key, args)) elif isiterable(self.metadata[key]): self.metadata[key].extend(args) else: # scalar if len(args) > 1: raise ValueError('Invalid value for key "{}": {}'.format(key, args)) self.metadata[key] = args[0] def to_pod(self): pod = super(Result, self).to_pod() pod['status'] = self.status.to_pod() pod['metrics'] = [m.to_pod() for m in self.metrics] pod['artifacts'] = [a.to_pod() for a in self.artifacts] pod['events'] = [e.to_pod() for e in self.events] pod['classifiers'] = copy(self.classifiers) pod['metadata'] = deepcopy(self.metadata) return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) pod['status'] = Status(pod['status']).to_pod() return pod ARTIFACT_TYPES = ['log', 'meta', 'data', 'export', 'raw'] ArtifactType = enum(ARTIFACT_TYPES) class Artifact(Podable): """ This is an artifact generated during execution/post-processing of a workload. Unlike metrics, this represents an actual artifact, such as a file, generated. This may be "output", such as trace, or it could be "meta data" such as logs. These are distinguished using the ``kind`` attribute, which also helps WA decide how it should be handled. Currently supported kinds are: :log: A log file. Not part of the "output" as such but contains information about the run/workload execution that be useful for diagnostics/meta analysis. :meta: A file containing metadata. This is not part of the "output", but contains information that may be necessary to reproduce the results (contrast with ``log`` artifacts which are *not* necessary). :data: This file contains new data, not available otherwise and should be considered part of the "output" generated by WA. Most traces would fall into this category. :export: Exported version of results or some other artifact. This signifies that this artifact does not contain any new data that is not available elsewhere and that it may be safely discarded without losing information. :raw: Signifies that this is a raw dump/log that is normally processed to extract useful information and is then discarded. In a sense, it is the opposite of ``export``, but in general may also be discarded. .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on how important it is to preserve this file, e.g. when archiving, vs how much space it takes up. Unlike ``export`` artifacts which are (almost) always ignored by other exporters as that would never result in data loss, ``raw`` files *may* be processed by exporters if they decided that the risk of losing potentially (though unlikely) useful data is greater than the time/space cost of handling the artifact (e.g. a database uploader may choose to ignore ``raw`` artifacts, where as a network filer archiver may choose to archive them). .. note: The kind parameter is intended to represent the logical function of a particular artifact, not it's intended means of processing -- this is left entirely up to the output processors. """ _pod_serialization_version = 2 @staticmethod def from_pod(pod): pod = Artifact._upgrade_pod(pod) pod_version = pod.pop('_pod_version') pod['kind'] = ArtifactType(pod['kind']) instance = Artifact(**pod) instance._pod_version = pod_version # pylint: disable =protected-access instance.is_dir = pod.pop('is_dir') return instance def __init__(self, name, path, kind, description=None, classifiers=None, is_dir=False): """" :param name: Name that uniquely identifies this artifact. :param path: The *relative* path of the artifact. Depending on the ``level`` must be either relative to the run or iteration output directory. Note: this path *must* be delimited using ``/`` irrespective of the operating system. :param kind: The type of the artifact this is (e.g. log file, result, etc.) this will be used as a hint to output processors. This must be one of ``'log'``, ``'meta'``, ``'data'``, ``'export'``, ``'raw'``. :param description: A free-form description of what this artifact is. :param classifiers: A set of key-value pairs to further classify this metric beyond current iteration (e.g. this can be used to identify sub-tests). """ super(Artifact, self).__init__() self.name = name self.path = path.replace('/', os.sep) if path is not None else path try: self.kind = ArtifactType(kind) except ValueError: msg = 'Invalid Artifact kind: {}; must be in {}' raise ValueError(msg.format(kind, ARTIFACT_TYPES)) self.description = description self.classifiers = classifiers or {} self.is_dir = is_dir def to_pod(self): pod = super(Artifact, self).to_pod() pod.update(self.__dict__) pod['kind'] = str(self.kind) pod['is_dir'] = self.is_dir return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod @staticmethod def _pod_upgrade_v2(pod): pod['is_dir'] = pod.get('is_dir', False) return pod def __str__(self): return self.path def __repr__(self): ft = 'dir' if self.is_dir else 'file' return '{} ({}) ({}): {}'.format(self.name, ft, self.kind, self.path) class Metric(Podable): """ This is a single metric collected from executing a workload. :param name: the name of the metric. Uniquely identifies the metric within the results. :param value: The numerical value of the metric for this execution of a workload. This can be either an int or a float. :param units: Units for the collected value. Can be None if the value has no units (e.g. it's a count or a standardised score). :param lower_is_better: Boolean flag indicating where lower values are better than higher ones. Defaults to False. :param classifiers: A set of key-value pairs to further classify this metric beyond current iteration (e.g. this can be used to identify sub-tests). """ __slots__ = ['name', 'value', 'units', 'lower_is_better', 'classifiers'] _pod_serialization_version = 1 @staticmethod def from_pod(pod): pod = Metric._upgrade_pod(pod) pod_version = pod.pop('_pod_version') instance = Metric(**pod) instance._pod_version = pod_version # pylint: disable =protected-access return instance @property def label(self): parts = ['{}={}'.format(n, v) for n, v in self.classifiers.items()] parts.insert(0, self.name) return '/'.join(parts) def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None): super(Metric, self).__init__() self.name = name self.value = numeric(value) self.units = units self.lower_is_better = lower_is_better self.classifiers = classifiers or {} def to_pod(self): pod = super(Metric, self).to_pod() pod['name'] = self.name pod['value'] = self.value pod['units'] = self.units pod['lower_is_better'] = self.lower_is_better pod['classifiers'] = self.classifiers return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod def __str__(self): result = '{}: {}'.format(self.name, self.value) if self.units: result += ' ' + self.units result += ' ({})'.format('-' if self.lower_is_better else '+') return result def __repr__(self): text = self.__str__() if self.classifiers: return '<{} {}>'.format(text, format_ordered_dict(self.classifiers)) else: return '<{}>'.format(text) class Event(Podable): """ An event that occured during a run. """ __slots__ = ['timestamp', 'message'] _pod_serialization_version = 1 @staticmethod def from_pod(pod): pod = Event._upgrade_pod(pod) pod_version = pod.pop('_pod_version') instance = Event(pod['message']) instance.timestamp = pod['timestamp'] instance._pod_version = pod_version # pylint: disable =protected-access return instance @property def summary(self): lines = self.message.split('\n') result = lines[0] if len(lines) > 1: result += '[...]' return result def __init__(self, message): super(Event, self).__init__() self.timestamp = datetime.utcnow() self.message = str(message) def to_pod(self): pod = super(Event, self).to_pod() pod['timestamp'] = self.timestamp pod['message'] = self.message return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod def __str__(self): return '[{}] {}'.format(self.timestamp, self.message) __repr__ = __str__ def init_run_output(path, wa_state, force=False): if os.path.exists(path): if force: logger.info('Removing existing output directory.') shutil.rmtree(os.path.abspath(path)) else: raise RuntimeError('path exists: {}'.format(path)) logger.info('Creating output directory.') os.makedirs(path) meta_dir = os.path.join(path, '__meta') os.makedirs(meta_dir) _save_raw_config(meta_dir, wa_state) touch(os.path.join(path, 'run.log')) info = RunInfo( run_name=wa_state.run_config.run_name, project=wa_state.run_config.project, project_stage=wa_state.run_config.project_stage, ) write_pod(info.to_pod(), os.path.join(meta_dir, 'run_info.json')) write_pod(RunState().to_pod(), os.path.join(path, '.run_state.json')) write_pod(Result().to_pod(), os.path.join(path, 'result.json')) ro = RunOutput(path) ro.update_metadata('versions', 'wa', get_wa_version_with_commit()) ro.update_metadata('versions', 'devlib', devlib.__full_version__) return ro def init_job_output(run_output, job): output_name = '{}-{}-{}'.format(job.id, job.spec.label, job.iteration) path = os.path.join(run_output.basepath, output_name) ensure_directory_exists(path) write_pod(Result().to_pod(), os.path.join(path, 'result.json')) job_output = JobOutput(path, job.id, job.label, job.iteration, job.retries) job_output.spec = job.spec job_output.status = job.status run_output.jobs.append(job_output) return job_output def discover_wa_outputs(path): # Use topdown=True to allow pruning dirs for root, dirs, _ in os.walk(path, topdown=True): if '__meta' in dirs: yield RunOutput(root) # Avoid recursing into the artifact as it can be very lengthy if a # large number of file is present (sysfs dump) dirs.clear() def _save_raw_config(meta_dir, state): raw_config_dir = os.path.join(meta_dir, 'raw_config') os.makedirs(raw_config_dir) for i, source in enumerate(state.loaded_config_sources): if not os.path.isfile(source): continue basename = os.path.basename(source) dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename)) shutil.copy(source, dest_path) class DatabaseOutput(Output): kind = None @property def resultfile(self): if self.conn is None or self.oid is None: return {} pod = self._get_pod_version() pod['metrics'] = self._get_metrics() pod['status'] = self._get_status() pod['classifiers'] = self._get_classifiers(self.oid, 'run') pod['events'] = self._get_events() pod['artifacts'] = self._get_artifacts() return pod @staticmethod def _build_command(columns, tables, conditions=None, joins=None): cmd = '''SELECT\n\t{}\nFROM\n\t{}'''.format(',\n\t'.join(columns), ',\n\t'.join(tables)) if joins: for join in joins: cmd += '''\nLEFT JOIN {} ON {}'''.format(join[0], join[1]) if conditions: cmd += '''\nWHERE\n\t{}'''.format('\nAND\n\t'.join(conditions)) return cmd + ';' def __init__(self, conn, oid=None, reload=True): # pylint: disable=super-init-not-called self.conn = conn self.oid = oid self.result = None if reload: self.reload() def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, self.oid) def __str__(self): return self.oid def reload(self): try: self.result = Result.from_pod(self.resultfile) except Exception as e: # pylint: disable=broad-except self.result = Result() self.result.status = Status.UNKNOWN self.add_event(str(e)) def get_artifact_path(self, name): artifact = self.get_artifact(name) if artifact.is_dir: return self._read_dir_artifact(artifact) else: return self._read_file_artifact(artifact) def _read_dir_artifact(self, artifact): artifact_path = tempfile.mkdtemp(prefix='wa_') with tarfile.open(fileobj=self.conn.lobject(int(artifact.path), mode='b'), mode='r|gz') as tar_file: safe_extract(tar_file, artifact_path) self.conn.commit() return artifact_path def _read_file_artifact(self, artifact): artifact = StringIO(self.conn.lobject(int(artifact.path)).read()) self.conn.commit() return artifact # pylint: disable=too-many-locals def _read_db(self, columns, tables, conditions=None, join=None, as_dict=True): # Automatically remove table name from column when using column names as keys or # allow for column names to be aliases when retrieving the data, # (db_column_name, alias) db_columns = [] aliases_colunms = [] for column in columns: if isinstance(column, tuple): db_columns.append(column[0]) aliases_colunms.append(column[1]) else: db_columns.append(column) aliases_colunms.append(column.rsplit('.', 1)[-1]) cmd = self._build_command(db_columns, tables, conditions, join) logger.debug(cmd) with self.conn.cursor() as cursor: cursor.execute(cmd) results = cursor.fetchall() self.conn.commit() if not as_dict: return results # Format the output dict using column names as keys output = [] for result in results: entry = {} for k, v in zip(aliases_colunms, result): entry[k] = v output.append(entry) return output def _get_pod_version(self): columns = ['_pod_version', '_pod_serialization_version'] tables = ['{}s'.format(self.kind)] conditions = ['{}s.oid = \'{}\''.format(self.kind, self.oid)] results = self._read_db(columns, tables, conditions) if results: return results[0] else: return None def _populate_classifers(self, pod, kind): for entry in pod: oid = entry.pop('oid') entry['classifiers'] = self._get_classifiers(oid, kind) return pod def _get_classifiers(self, oid, kind): columns = ['classifiers.key', 'classifiers.value'] tables = ['classifiers'] conditions = ['{}_oid = \'{}\''.format(kind, oid)] results = self._read_db(columns, tables, conditions, as_dict=False) classifiers = {} for (k, v) in results: classifiers[k] = v return classifiers def _get_metrics(self): columns = ['metrics.name', 'metrics.value', 'metrics.units', 'metrics.lower_is_better', 'metrics.oid', 'metrics._pod_version', 'metrics._pod_serialization_version'] tables = ['metrics'] joins = [('classifiers', 'classifiers.metric_oid = metrics.oid')] conditions = ['metrics.{}_oid = \'{}\''.format(self.kind, self.oid)] pod = self._read_db(columns, tables, conditions, joins) return self._populate_classifers(pod, 'metric') def _get_status(self): columns = ['{}s.status'.format(self.kind)] tables = ['{}s'.format(self.kind)] conditions = ['{}s.oid = \'{}\''.format(self.kind, self.oid)] results = self._read_db(columns, tables, conditions, as_dict=False) if results: return results[0][0] else: return None def _get_artifacts(self): columns = ['artifacts.name', 'artifacts.description', 'artifacts.kind', ('largeobjects.lo_oid', 'path'), 'artifacts.oid', 'artifacts.is_dir', 'artifacts._pod_version', 'artifacts._pod_serialization_version'] tables = ['largeobjects', 'artifacts'] joins = [('classifiers', 'classifiers.artifact_oid = artifacts.oid')] conditions = ['artifacts.{}_oid = \'{}\''.format(self.kind, self.oid), 'artifacts.large_object_uuid = largeobjects.oid'] # If retrieving run level artifacts we want those that don't also belong to a job if self.kind == 'run': conditions.append('artifacts.job_oid IS NULL') pod = self._read_db(columns, tables, conditions, joins) for artifact in pod: artifact['path'] = str(artifact['path']) return self._populate_classifers(pod, 'metric') def _get_events(self): columns = ['events.message', 'events.timestamp'] tables = ['events'] conditions = ['events.{}_oid = \'{}\''.format(self.kind, self.oid)] return self._read_db(columns, tables, conditions) def kernel_config_from_db(raw): kernel_config = {} if raw: for k, v in zip(raw[0], raw[1]): kernel_config[k] = v return kernel_config class RunDatabaseOutput(DatabaseOutput, RunOutputCommon): kind = 'run' @property def basepath(self): return 'db:({})-{}@{}:{}'.format(self.dbname, self.user, self.host, self.port) @property def augmentations(self): columns = ['augmentations.name'] tables = ['augmentations'] conditions = ['augmentations.run_oid = \'{}\''.format(self.oid)] results = self._read_db(columns, tables, conditions, as_dict=False) return [a for augs in results for a in augs] @property def _db_infofile(self): columns = ['start_time', 'project', ('run_uuid', 'uuid'), 'end_time', 'run_name', 'duration', '_pod_version', '_pod_serialization_version'] tables = ['runs'] conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)] pod = self._read_db(columns, tables, conditions) if not pod: return {} return pod[0] @property def _db_targetfile(self): columns = ['os', 'is_rooted', 'target', 'modules', 'abi', 'cpus', 'os_version', 'hostid', 'hostname', 'kernel_version', 'kernel_release', 'kernel_sha1', 'kernel_config', 'sched_features', 'page_size_kb', 'system_id', 'screen_resolution', 'prop', 'android_id', '_pod_version', '_pod_serialization_version'] tables = ['targets'] conditions = ['targets.run_oid = \'{}\''.format(self.oid)] pod = self._read_db(columns, tables, conditions) if not pod: return {} pod = pod[0] try: pod['cpus'] = [json.loads(cpu) for cpu in pod.pop('cpus')] except SerializerSyntaxError: pod['cpus'] = [] logger.debug('Failed to deserialize target cpu information') pod['kernel_config'] = kernel_config_from_db(pod['kernel_config']) return pod @property def _db_statefile(self): # Read overall run information columns = ['runs.state'] tables = ['runs'] conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)] pod = self._read_db(columns, tables, conditions) pod = pod[0].get('state') if not pod: return {} # Read job information columns = ['jobs.job_id', 'jobs.oid'] tables = ['jobs'] conditions = ['jobs.run_oid = \'{}\''.format(self.oid)] job_oids = self._read_db(columns, tables, conditions) # Match job oid with jobs from state file for job in pod.get('jobs', []): for job_oid in job_oids: if job['id'] == job_oid['job_id']: job['oid'] = job_oid['oid'] break return pod @property def _db_jobsfile(self): workload_params = self._get_parameters('workload') runtime_params = self._get_parameters('runtime') columns = [('jobs.job_id', 'id'), 'jobs.label', 'jobs.workload_name', 'jobs.oid', 'jobs._pod_version', 'jobs._pod_serialization_version'] tables = ['jobs'] conditions = ['jobs.run_oid = \'{}\''.format(self.oid)] jobs = self._read_db(columns, tables, conditions) for job in jobs: job['augmentations'] = self._get_job_augmentations(job['oid']) job['workload_parameters'] = workload_params.pop(job['oid'], {}) job['runtime_parameters'] = runtime_params.pop(job['oid'], {}) job.pop('oid') return jobs @property def _db_run_config(self): pod = defaultdict(dict) parameter_types = ['augmentation', 'resource_getter'] for parameter_type in parameter_types: columns = ['parameters.name', 'parameters.value', 'parameters.value_type', ('{}s.name'.format(parameter_type), '{}'.format(parameter_type))] tables = ['parameters', '{}s'.format(parameter_type)] conditions = ['parameters.run_oid = \'{}\''.format(self.oid), 'parameters.type = \'{}\''.format(parameter_type), 'parameters.{0}_oid = {0}s.oid'.format(parameter_type)] configs = self._read_db(columns, tables, conditions) for config in configs: entry = {config['name']: json.loads(config['value'])} pod['{}s'.format(parameter_type)][config.pop(parameter_type)] = entry # run config columns = ['runs.max_retries', 'runs.allow_phone_home', 'runs.bail_on_init_failure', 'runs.retry_on_status'] tables = ['runs'] conditions = ['runs.oid = \'{}\''.format(self.oid)] config = self._read_db(columns, tables, conditions) if not config: return {} config = config[0] # Convert back into a string representation of an enum list config['retry_on_status'] = config['retry_on_status'][1:-1].split(',') pod.update(config) return pod def __init__(self, password=None, dbname='wa', host='localhost', port='5432', user='postgres', run_uuid=None, list_runs=False): if psycopg2 is None: msg = 'Please install the psycopg2 in order to connect to postgres databases' raise HostError(msg) self.dbname = dbname self.host = host self.port = port self.user = user self.password = password self.run_uuid = run_uuid self.conn = None self.info = None self.state = None self.result = None self.target_info = None self._combined_config = None self.jobs = [] self.job_specs = [] self.connect() super(RunDatabaseOutput, self).__init__(conn=self.conn, reload=False) local_schema_version, db_schema_version = get_schema_versions(self.conn) if local_schema_version != db_schema_version: self.disconnect() msg = 'The current database schema is v{} however the local ' \ 'schema version is v{}. Please update your database ' \ 'with the create command' raise HostError(msg.format(db_schema_version, local_schema_version)) if list_runs: print('Available runs are:') self._list_runs() self.disconnect() return if not self.run_uuid: print('Please specify "Run uuid"') self._list_runs() self.disconnect() return if not self.oid: self.oid = self._get_oid() self.reload() def read_job_specs(self): job_specs = [] for job in self._db_jobsfile: job_specs.append(JobSpec.from_pod(job)) return job_specs def connect(self): if self.conn and not self.conn.closed: return try: self.conn = psycopg2.connect(dbname=self.dbname, user=self.user, host=self.host, password=self.password, port=self.port) except Psycopg2Error as e: raise HostError('Unable to connect to the Database: "{}'.format(e.args[0])) def disconnect(self): self.conn.commit() self.conn.close() def reload(self): super(RunDatabaseOutput, self).reload() info_pod = self._db_infofile state_pod = self._db_statefile if not info_pod or not state_pod: msg = '"{}" does not appear to be a valid WA Database Output.' raise ValueError(msg.format(self.oid)) self.info = RunInfo.from_pod(info_pod) self.state = RunState.from_pod(state_pod) self._combined_config = CombinedConfig.from_pod({'run_config': self._db_run_config}) self.target_info = TargetInfo.from_pod(self._db_targetfile) self.job_specs = self.read_job_specs() for job_state in self._db_statefile['jobs']: job = JobDatabaseOutput(self.conn, job_state.get('oid'), job_state['id'], job_state['label'], job_state['iteration'], job_state['retries']) job.status = job_state['status'] job.spec = self.get_job_spec(job.id) if job.spec is None: logger.warning('Could not find spec for job {}'.format(job.id)) self.jobs.append(job) def _get_oid(self): columns = ['{}s.oid'.format(self.kind)] tables = ['{}s'.format(self.kind)] conditions = ['runs.run_uuid = \'{}\''.format(self.run_uuid)] oid = self._read_db(columns, tables, conditions, as_dict=False) if not oid: raise ConfigError('No matching run entries found for run_uuid {}'.format(self.run_uuid)) if len(oid) > 1: raise ConfigError('Multiple entries found for run_uuid: {}'.format(self.run_uuid)) return oid[0][0] def _get_parameters(self, param_type): columns = ['parameters.job_oid', 'parameters.name', 'parameters.value'] tables = ['parameters'] conditions = ['parameters.type = \'{}\''.format(param_type), 'parameters.run_oid = \'{}\''.format(self.oid)] params = self._read_db(columns, tables, conditions, as_dict=False) parm_dict = defaultdict(dict) for (job_oid, k, v) in params: try: parm_dict[job_oid][k] = json.loads(v) except SerializerSyntaxError: logger.debug('Failed to deserialize job_oid:{}-"{}":"{}"'.format(job_oid, k, v)) return parm_dict def _get_job_augmentations(self, job_oid): columns = ['jobs_augs.augmentation_oid', 'augmentations.name', 'augmentations.oid', 'jobs_augs.job_oid'] tables = ['jobs_augs', 'augmentations'] conditions = ['jobs_augs.job_oid = \'{}\''.format(job_oid), 'jobs_augs.augmentation_oid = augmentations.oid'] augmentations = self._read_db(columns, tables, conditions) return [aug['name'] for aug in augmentations] def _list_runs(self): columns = ['runs.run_uuid', 'runs.run_name', 'runs.project', 'runs.project_stage', 'runs.status', 'runs.start_time', 'runs.end_time'] tables = ['runs'] pod = self._read_db(columns, tables) if pod: headers = ['Run Name', 'Project', 'Project Stage', 'Start Time', 'End Time', 'run_uuid'] run_list = [] for entry in pod: # Format times to display better start_time = entry['start_time'] end_time = entry['end_time'] if start_time: start_time = start_time.strftime("%Y-%m-%d %H:%M:%S") if end_time: end_time = end_time.strftime("%Y-%m-%d %H:%M:%S") run_list.append([ entry['run_name'], entry['project'], entry['project_stage'], start_time, end_time, entry['run_uuid']]) print(format_simple_table(run_list, headers)) else: print('No Runs Found') class JobDatabaseOutput(DatabaseOutput): kind = 'job' def __init__(self, conn, oid, job_id, label, iteration, retry): super(JobDatabaseOutput, self).__init__(conn, oid=oid) self.id = job_id self.label = label self.iteration = iteration self.retry = retry self.result = None self.spec = None self.reload() def __repr__(self): return '<{} {}-{}-{}>'.format(self.__class__.__name__, self.id, self.label, self.iteration) def __str__(self): return '{}-{}-{}'.format(self.id, self.label, self.iteration) @property def augmentations(self): job_augs = set([]) if self.spec: for aug in self.spec.augmentations: job_augs.add(aug) return list(job_augs) ================================================ FILE: wa/framework/output_processor.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from wa.framework import pluginloader from wa.framework.exception import ConfigError from wa.framework.instrument import is_installed from wa.framework.plugin import Plugin from wa.utils.log import log_error, indentcontext from wa.utils.misc import isiterable from wa.utils.types import identifier class OutputProcessor(Plugin): kind = 'output_processor' requires = [] def __init__(self, **kwargs): super(OutputProcessor, self).__init__(**kwargs) self.is_enabled = True def validate(self): super(OutputProcessor, self).validate() for instrument in self.requires: if not is_installed(instrument): msg = 'Instrument "{}" is required by {}, but is not installed.' raise ConfigError(msg.format(instrument, self.name)) def initialize(self, context): pass def finalize(self, context): pass class ProcessorManager(object): def __init__(self, loader=pluginloader): self.loader = loader self.logger = logging.getLogger('processor') self.processors = [] def install(self, processor, context): if not isinstance(processor, OutputProcessor): processor = self.loader.get_output_processor(processor) self.logger.debug('Installing {}'.format(processor.name)) processor.logger.context = context self.processors.append(processor) context.add_augmentation(processor) def disable_all(self): for output_processor in self.processors: self._disable_output_processor(output_processor) def enable_all(self): for output_processor in self.processors: self._enable_output_processor(output_processor) def enable(self, to_enable): if isiterable(to_enable): for inst in to_enable: self._enable_output_processor(inst) else: self._enable_output_processor(to_enable) def disable(self, to_disable): if isiterable(to_disable): for inst in to_disable: self._disable_output_processor(inst) else: self._disable_output_processor(to_disable) def get_output_processor(self, processor): if isinstance(processor, OutputProcessor): return processor processor = identifier(processor) for p in self.processors: if processor == p.name: return p raise ValueError('Output processor {} is not installed'.format(processor)) def get_enabled(self): return [p for p in self.processors if p.is_enabled] def get_disabled(self): return [p for p in self.processors if not p.is_enabled] def validate(self): for proc in self.processors: proc.validate() def initialize(self, context): for proc in self.processors: proc.initialize(context) def finalize(self, context): for proc in self.processors: proc.finalize(context) def process_job_output(self, context): self.do_for_each_proc('process_job_output', 'Processing using "{}"', context.job_output, context.target_info, context.run_output) def export_job_output(self, context): self.do_for_each_proc('export_job_output', 'Exporting using "{}"', context.job_output, context.target_info, context.run_output) def process_run_output(self, context): self.do_for_each_proc('process_run_output', 'Processing using "{}"', context.run_output, context.target_info) def export_run_output(self, context): self.do_for_each_proc('export_run_output', 'Exporting using "{}"', context.run_output, context.target_info) def do_for_each_proc(self, method_name, message, *args): with indentcontext(): for proc in self.processors: if proc.is_enabled: proc_func = getattr(proc, method_name, None) if proc_func is None: continue try: self.logger.info(message.format(proc.name)) proc_func(*args) except Exception as e: # pylint: disable=broad-except if isinstance(e, KeyboardInterrupt): raise log_error(e, self.logger) def _enable_output_processor(self, inst): inst = self.get_output_processor(inst) self.logger.debug('Enabling output processor {}'.format(inst.name)) if not inst.is_enabled: inst.is_enabled = True def _disable_output_processor(self, inst): inst = self.get_output_processor(inst) self.logger.debug('Disabling output processor {}'.format(inst.name)) if inst.is_enabled: inst.is_enabled = False ================================================ FILE: wa/framework/plugin.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=E1101 import os import sys import inspect import logging from collections import OrderedDict, defaultdict from itertools import chain from copy import copy from future.utils import with_metaclass from wa.framework.configuration.core import settings, ConfigurationPoint as Parameter from wa.framework.exception import (NotFoundError, PluginLoaderError, TargetError, ValidationError, ConfigError, HostError) from wa.utils import log from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class, merge_dicts_simple, get_article, import_path) from wa.utils.types import identifier class AttributeCollection(object): """ Accumulator for plugin attribute objects (such as Parameters or Artifacts). This will replace any class member list accumulating such attributes through the magic of metaprogramming\ [*]_. .. [*] which is totally safe and not going backfire in any way... """ @property def values(self): return list(self._attrs.values()) def __init__(self, attrcls): self._attrcls = attrcls self._attrs = OrderedDict() def add(self, p): p = self._to_attrcls(p) if p.name in self._attrs: if p.override: newp = copy(self._attrs[p.name]) for a, v in p.__dict__.items(): if v is not None: setattr(newp, a, v) if not hasattr(newp, "_overridden"): # pylint: disable=protected-access newp._overridden = p._owner self._attrs[p.name] = newp else: # Duplicate attribute condition is check elsewhere. pass else: self._attrs[p.name] = p append = add def __str__(self): return 'AC({})'.format(list(map(str, list(self._attrs.values())))) __repr__ = __str__ def _to_attrcls(self, p): if not isinstance(p, self._attrcls): raise ValueError('Invalid attribute value: {}; must be a {}'.format(p, self._attrcls)) if p.name in self._attrs and not p.override: raise ValueError('Attribute {} has already been defined.'.format(p.name)) return p def __iadd__(self, other): for p in other: self.add(p) return self def __iter__(self): return iter(self.values) def __contains__(self, p): return p in self._attrs def __getitem__(self, i): return self._attrs[i] def __len__(self): return len(self._attrs) class AliasCollection(AttributeCollection): def __init__(self): super(AliasCollection, self).__init__(Alias) def _to_attrcls(self, p): if isinstance(p, (list, tuple)): # must be in the form (name, {param: value, ...}) # pylint: disable=protected-access p = self._attrcls(p[1], **p[1]) elif not isinstance(p, self._attrcls): raise ValueError('Invalid parameter value: {}'.format(p)) if p.name in self._attrs: raise ValueError('Attribute {} has already been defined.'.format(p.name)) return p class ListCollection(list): def __init__(self, attrcls): # pylint: disable=unused-argument super(ListCollection, self).__init__() class Alias(object): """ This represents a configuration alias for an plugin, mapping an alternative name to a set of parameter values, effectively providing an alternative set of default values. """ def __init__(self, name, **kwargs): self.name = name self.params = kwargs self.plugin_name = None # gets set by the MetaClass def validate(self, ext): ext_params = set(p.name for p in ext.parameters) for param in self.params: if param not in ext_params: # Raising config error because aliases might have come through # the config. msg = 'Parameter {} (defined in alias {}) is invalid for {}' raise ConfigError(msg.format(param, self.name, ext.name)) # pylint: disable=bad-mcs-classmethod-argument class PluginMeta(type): """ This basically adds some magic to plugins to make implementing new plugins, such as workloads less complicated. It ensures that certain class attributes (specified by the ``to_propagate`` attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption is that the values of the attributes specified in the class are iterable; if that is not met, Bad Things (tm) will happen. """ to_propagate = [ ('parameters', Parameter, AttributeCollection), ] def __new__(mcs, clsname, bases, attrs): mcs._propagate_attributes(bases, attrs, clsname) cls = type.__new__(mcs, clsname, bases, attrs) mcs._setup_aliases(cls) return cls @classmethod def _propagate_attributes(mcs, bases, attrs, clsname): # pylint: disable=too-many-locals # pylint: disable=protected-access """ For attributes specified by to_propagate, their values will be a union of that specified for cls and its bases (cls values overriding those of bases in case of conflicts). """ for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate: should_propagate = False propagated = attr_collector_cls(attr_cls) for base in bases: if hasattr(base, prop_attr): propagated += getattr(base, prop_attr) or [] should_propagate = True if prop_attr in attrs: pattrs = attrs[prop_attr] or [] for pa in pattrs: if not isinstance(pa, attr_cls): msg = 'Invalid value "{}" for attribute "{}"; must be a {}' raise ValueError(msg.format(pa, prop_attr, attr_cls)) pa._owner = clsname propagated += pattrs should_propagate = True if should_propagate: for p in propagated: override = bool(getattr(p, "override", None)) overridden = bool(getattr(p, "_overridden", None)) if override != overridden: msg = "Overriding non existing parameter '{}' inside '{}'" raise ValueError(msg.format(p.name, p._owner)) attrs[prop_attr] = propagated @classmethod def _setup_aliases(mcs, cls): if hasattr(cls, 'aliases'): aliases, cls.aliases = cls.aliases, AliasCollection() for alias in aliases: if isinstance(alias, str): alias = Alias(alias) alias.validate(cls) alias.plugin_name = cls.name cls.aliases.add(alias) class Plugin(with_metaclass(PluginMeta, object)): """ Base class for all WA plugins. An plugin is basically a plug-in. It extends the functionality of WA in some way. Plugins are discovered and loaded dynamically by the plugin loader upon invocation of WA scripts. Adding an plugin is a matter of placing a class that implements an appropriate interface somewhere it would be discovered by the loader. That "somewhere" is typically one of the plugin subdirectories under ``~/.workload_automation/``. """ kind = None name = None parameters = [] artifacts = [] aliases = [] core_modules = [] @classmethod def get_default_config(cls): return {p.name: p.default for p in cls.parameters if not p.deprecated} @property def dependencies_directory(self): return _d(os.path.join(settings.dependencies_directory, self.name)) @property def _classname(self): return self.__class__.__name__ def __init__(self, **kwargs): self.logger = logging.getLogger(self.name) self._modules = [] self.capabilities = getattr(self.__class__, 'capabilities', []) for param in self.parameters: param.set_value(self, kwargs.get(param.name)) for key in kwargs: if key not in self.parameters: message = 'Unexpected parameter "{}" for {}' raise ConfigError(message.format(key, self.name)) def get_config(self): """ Returns current configuration (i.e. parameter values) of this plugin. """ config = {} for param in self.parameters: config[param.name] = getattr(self, param.name, None) return config def validate(self): """ Perform basic validation to ensure that this plugin is capable of running. This is intended as an early check to ensure the plugin has not been mis-configured, rather than a comprehensive check (that may, e.g., require access to the execution context). This method may also be used to enforce (i.e. set as well as check) inter-parameter constraints for the plugin (e.g. if valid values for parameter A depend on the value of parameter B -- something that is not possible to enfroce using ``Parameter``\ 's ``constraint`` attribute. """ if self.name is None: raise ValidationError('Name not set for {}'.format(self._classname)) for param in self.parameters: param.validate(self) def __getattr__(self, name): if name == '_modules': raise ValueError('_modules accessed too early!') for module in self._modules: if hasattr(module, name): return getattr(module, name) raise AttributeError(name) def load_modules(self, loader): """ Load the modules specified by the "modules" Parameter using the provided loader. A loader can be any object that has an atribute called "get_module" that implements the following signature:: get_module(name, owner, **kwargs) and returns an instance of :class:`wa.core.plugin.Module`. If the module with the specified name is not found, the loader must raise an appropriate exception. """ modules = list(reversed(self.core_modules)) modules += list(reversed(self.modules or [])) if not modules: return for module_spec in modules: if not module_spec: continue module = self._load_module(loader, module_spec) self._install_module(module) def has(self, capability): """ Check if this plugin has the specified capability. The alternative method ``can`` is identical to this. Which to use is up to the caller depending on what makes semantic sense in the context of the capability, e.g. ``can('hard_reset')`` vs ``has('active_cooling')``. """ return capability in self.capabilities can = has def _load_module(self, loader, module_spec): if isinstance(module_spec, str): name = module_spec params = {} elif isinstance(module_spec, dict): if len(module_spec) != 1: msg = 'Invalid module spec: {}; dict must have exctly one key -- '\ 'the module name.' raise ValueError(msg.format(module_spec)) name, params = list(module_spec.items())[0] else: message = 'Invalid module spec: {}; must be a string or a one-key dict.' raise ValueError(message.format(module_spec)) if not isinstance(params, dict): message = 'Invalid module spec: {}; dict value must also be a dict.' raise ValueError(message.format(module_spec)) module = loader.get_module(name, owner=self, **params) module.initialize(None) return module def _install_module(self, module): for capability in module.capabilities: if capability not in self.capabilities: self.capabilities.append(capability) self._modules.append(module) def __str__(self): return str(self.name) def __repr__(self): params = [] for param in self.parameters: params.append('{}={}'.format(param.name, getattr(self, param.name, None))) return '{}({})'.format(self.name, ', '.join(params)) class TargetedPlugin(Plugin): """ A plugin that interacts with a target device. """ supported_targets = [] parameters = [ Parameter('cleanup_assets', kind=bool, global_alias='cleanup_assets', aliases=['clean_up'], default=True, description=""" If ``True``, assets that are deployed or created by the plugin will be removed again from the device. """), ] @classmethod def check_compatible(cls, target): if cls.supported_targets: if target.os not in cls.supported_targets: msg = 'Incompatible target OS "{}" for {}' raise TargetError(msg.format(target.os, cls.name)) def __init__(self, target, **kwargs): super(TargetedPlugin, self).__init__(**kwargs) self.check_compatible(target) self.target = target class PluginLoaderItem(object): def __init__(self, ext_tuple): self.name = ext_tuple.name self.default_package = ext_tuple.default_package self.default_path = ext_tuple.default_path self.cls = load_class(ext_tuple.cls) class PluginLoader(object): """ Discovers, enumerates and loads available devices, configs, etc. The loader will attempt to discover things on construction by looking in predetermined set of locations defined by default_paths. Optionally, additional locations may specified through paths parameter that must be a list of additional Python module paths (i.e. dot-delimited). """ def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False): """ params:: :packages: List of packages to load plugins from. :paths: List of paths to be searched for Python modules containing WA plugins. :ignore_paths: List of paths to ignore when search for WA plugins (these would typically be subdirectories of one or more locations listed in ``paths`` parameter. :keep_going: Specifies whether to keep going if an error occurs while loading plugins. """ self.logger = logging.getLogger('pluginloader') self.keep_going = keep_going self.packages = packages or [] self.paths = paths or [] self.ignore_paths = ignore_paths or [] self.plugins = {} self.kind_map = defaultdict(dict) self.aliases = {} self.global_param_aliases = {} self._discover_from_packages(self.packages) self._discover_from_paths(self.paths, self.ignore_paths) def update(self, packages=None, paths=None, ignore_paths=None): """ Load plugins from the specified paths/packages without clearing or reloading existing plugin. """ msg = 'Updating from: packages={} paths={}' self.logger.debug(msg.format(packages, paths)) if packages: self.packages.extend(packages) self._discover_from_packages(packages) if paths: self.paths.extend(paths) self.ignore_paths.extend(ignore_paths or []) self._discover_from_paths(paths, ignore_paths or []) def clear(self): """ Clear all discovered items. """ self.plugins = {} self.kind_map.clear() self.aliases.clear() self.global_param_aliases.clear() def reload(self): """ Clear all discovered items and re-run the discovery. """ self.logger.debug('Reloading') self.clear() self._discover_from_packages(self.packages) self._discover_from_paths(self.paths, self.ignore_paths) def get_plugin_class(self, name, kind=None): """ Return the class for the specified plugin if found or raises ``ValueError``. """ name, _ = self.resolve_alias(name) if kind is None: try: return self.plugins[name] except KeyError: raise NotFoundError('plugins {} not found.'.format(name)) if kind not in self.kind_map: raise ValueError('Unknown plugin type: {}'.format(kind)) store = self.kind_map[kind] if name not in store: msg = 'plugins {} is not {} {}.' raise NotFoundError(msg.format(name, get_article(kind), kind)) return store[name] def get_plugin(self, name=None, kind=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg """ Return plugin of the specified kind with the specified name. Any additional parameters will be passed to the plugin's __init__. """ name, base_kwargs = self.resolve_alias(name) kwargs = OrderedDict(chain(iter(base_kwargs.items()), iter(kwargs.items()))) cls = self.get_plugin_class(name, kind) plugin = cls(*args, **kwargs) return plugin def get_default_config(self, name): """ Returns the default configuration for the specified plugin name. The name may be an alias, in which case, the returned config will be augmented with appropriate alias overrides. """ real_name, alias_config = self.resolve_alias(name) base_default_config = self.get_plugin_class(real_name).get_default_config() return merge_dicts_simple(base_default_config, alias_config) def list_plugins(self, kind=None): """ List discovered plugin classes. Optionally, only list plugins of a particular type. """ if kind is None: return list(self.plugins.values()) if kind not in self.kind_map: raise ValueError('Unknown plugin type: {}'.format(kind)) return list(self.kind_map[kind].values()) def has_plugin(self, name, kind=None): """ Returns ``True`` if an plugins with the specified ``name`` has been discovered by the loader. If ``kind`` was specified, only returns ``True`` if the plugin has been found, *and* it is of the specified kind. """ try: self.get_plugin_class(name, kind) return True except NotFoundError: return False def resolve_alias(self, alias_name): """ Try to resolve the specified name as an plugin alias. Returns a two-tuple, the first value of which is actual plugin name, and the iisecond is a dict of parameter values for this alias. If the name passed is already an plugin name, then the result is ``(alias_name, {})``. """ alias_name = identifier(alias_name.lower()) if alias_name in self.plugins: return (alias_name, {}) if alias_name in self.aliases: alias = self.aliases[alias_name] return (alias.plugin_name, copy(alias.params)) raise NotFoundError('Could not find plugin or alias "{}"'.format(alias_name)) # Internal methods. def __getattr__(self, name): """ This resolves methods for specific plugins types based on corresponding generic plugin methods. So it's possible to say things like :: loader.get_device('foo') instead of :: loader.get_plugin('foo', kind='device') """ error_msg = 'No plugins of type "{}" discovered' if name.startswith('get_'): name = name.replace('get_', '', 1) if name in self.kind_map: def __wrapper(pname, *args, **kwargs): return self.get_plugin(pname, name, *args, **kwargs) return __wrapper raise NotFoundError(error_msg.format(name)) if name.startswith('list_'): name = name.replace('list_', '', 1).rstrip('s') if name in self.kind_map: def __wrapper(*args, **kwargs): # pylint: disable=E0102 return self.list_plugins(name, *args, **kwargs) return __wrapper raise NotFoundError(error_msg.format(name)) if name.startswith('has_'): name = name.replace('has_', '', 1) if name in self.kind_map: def __wrapper(pname, *args, **kwargs): # pylint: disable=E0102 return self.has_plugin(pname, name, *args, **kwargs) return __wrapper raise NotFoundError(error_msg.format(name)) raise AttributeError(name) def _discover_from_packages(self, packages): self.logger.debug('Discovering plugins in packages') try: for package in packages: for module in walk_modules(package): self._discover_in_module(module) except HostError as e: message = 'Problem loading plugins from {}: {}' raise PluginLoaderError(message.format(e.module, str(e.orig_exc)), e.exc_info) def _discover_from_paths(self, paths, ignore_paths): paths = paths or [] ignore_paths = ignore_paths or [] self.logger.debug('Discovering plugins in paths') for path in paths: self.logger.debug('Checking path %s', path) if os.path.isfile(path): self._discover_from_file(path) elif os.path.exists(path): for root, _, files in os.walk(path, followlinks=True): should_skip = False for igpath in ignore_paths: if root.startswith(igpath): should_skip = True break if should_skip: continue for fname in files: if os.path.splitext(fname)[1].lower() != '.py': continue filepath = os.path.join(root, fname) self._discover_from_file(filepath) elif not os.path.isabs(path): try: for module in walk_modules(path): self._discover_in_module(module) except Exception: # NOQA pylint: disable=broad-except pass def _discover_from_file(self, filepath): try: module = import_path(filepath) self._discover_in_module(module) except (SystemExit, ImportError) as e: if self.keep_going: self.logger.warning('Failed to load {}'.format(filepath)) self.logger.warning('Got: {}'.format(e)) else: msg = 'Failed to load {}' raise PluginLoaderError(msg.format(filepath), sys.exc_info()) except Exception as e: message = 'Problem loading plugins from {}: {}' raise PluginLoaderError(message.format(filepath, e)) def _discover_in_module(self, module): # NOQA pylint: disable=too-many-branches self.logger.debug('Checking module %s', module.__name__) with log.indentcontext(): for obj in vars(module).values(): if inspect.isclass(obj): if not issubclass(obj, Plugin): continue if obj.__module__ != module.__name__: continue if not obj.kind: message = 'Skipping plugin {} as it does not define a kind' self.logger.debug(message.format(obj.__name__)) continue if not obj.name: message = 'Skipping {} {} as it does not define a name' self.logger.debug(message.format(obj.kind, obj.__name__)) continue try: self._add_found_plugin(obj) except PluginLoaderError as e: if self.keep_going: self.logger.warning(e) else: raise e def _add_found_plugin(self, obj): """ :obj: Found plugin class :ext: matching plugin item. """ self.logger.debug('Adding %s %s', obj.kind, obj.name) key = identifier(obj.name.lower()) if key in self.plugins or key in self.aliases: msg = '{} "{}" already exists.' raise PluginLoaderError(msg.format(obj.kind, obj.name)) # plugins are tracked both, in a common plugins # dict, and in per-plugin kind dict (as retrieving # plugins by kind is a common use case. self.plugins[key] = obj self.kind_map[obj.kind][key] = obj for alias in obj.aliases: alias_id = identifier(alias.name.lower()) if alias_id in self.plugins or alias_id in self.aliases: msg = '{} "{}" already exists.' raise PluginLoaderError(msg.format(obj.kind, obj.name)) self.aliases[alias_id] = alias ================================================ FILE: wa/framework/pluginloader.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys class __LoaderWrapper(object): @property def kinds(self): if not self._loader: self.reset() return list(self._loader.kind_map.keys()) @property def kind_map(self): if not self._loader: self.reset() return self._loader.kind_map def __init__(self): self._loader = None def reset(self): # These imports cannot be done at top level, because of # sys.modules manipulation below # pylint: disable=import-outside-toplevel from wa.framework.plugin import PluginLoader from wa.framework.configuration.core import settings self._loader = PluginLoader(settings.plugin_packages, settings.plugin_paths, []) def update(self, packages=None, paths=None, ignore_paths=None): if not self._loader: self.reset() self._loader.update(packages, paths, ignore_paths) def reload(self): if not self._loader: self.reset() self._loader.reload() def list_plugins(self, kind=None): if not self._loader: self.reset() return self._loader.list_plugins(kind) def has_plugin(self, name, kind=None): if not self._loader: self.reset() return self._loader.has_plugin(name, kind) def get_plugin_class(self, name, kind=None): if not self._loader: self.reset() return self._loader.get_plugin_class(name, kind) def get_plugin(self, name=None, kind=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg if not self._loader: self.reset() return self._loader.get_plugin(name=name, kind=kind, *args, **kwargs) def get_default_config(self, name): if not self._loader: self.reset() return self._loader.get_default_config(name) def resolve_alias(self, name): if not self._loader: self.reset() return self._loader.resolve_alias(name) def __getattr__(self, name): if not self._loader: self.reset() return getattr(self._loader, name) sys.modules[__name__] = __LoaderWrapper() ================================================ FILE: wa/framework/resource.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import re from wa.framework import pluginloader from wa.framework.plugin import Plugin from wa.framework.exception import ResourceError from wa.framework.configuration import settings from wa.utils import log from wa.utils.android import get_cacheable_apk_info from wa.utils.misc import get_object_name from wa.utils.types import enum, list_or_string, prioritylist, version_tuple SourcePriority = enum(['package', 'remote', 'lan', 'local', 'perferred'], start=0, step=10) class __NullOwner(object): """Represents an owner for a resource not owned by anyone.""" name = 'noone' dependencies_directory = settings.dependencies_directory def __getattr__(self, name): return None def __str__(self): return 'no-one' __repr__ = __str__ NO_ONE = __NullOwner() class Resource(object): """ Represents a resource that needs to be resolved. This can be pretty much anything: a file, environment variable, a Python object, etc. The only thing a resource *has* to have is an owner (which would normally be the Workload/Instrument/Device/etc object that needs the resource). In addition, a resource have any number of attributes to identify, but all of them are resource type specific. """ kind = None def __init__(self, owner=NO_ONE): self.owner = owner def match(self, path): return self.match_path(path) def match_path(self, path): raise NotImplementedError() def __str__(self): return '<{}\'s {}>'.format(self.owner, self.kind) class File(Resource): kind = 'file' def __init__(self, owner, path): super(File, self).__init__(owner) self.path = path def match_path(self, path): return self.path == path def __str__(self): return '<{}\'s {} {} file>'.format(self.owner, self.kind, self.path) class Executable(Resource): kind = 'executable' def __init__(self, owner, abi, filename): super(Executable, self).__init__(owner) self.abi = abi self.filename = filename def match_path(self, path): return self.filename == os.path.basename(path) def __str__(self): return '<{}\'s {} {} executable>'.format(self.owner, self.abi, self.filename) class ReventFile(Resource): kind = 'revent' def __init__(self, owner, stage, target): super(ReventFile, self).__init__(owner) self.stage = stage self.target = target def match_path(self, path): filename = os.path.basename(path) parts = filename.split('.') if len(parts) > 2: target, stage = parts[:2] return target == self.target and stage == self.stage else: stage = parts[0] return stage == self.stage class JarFile(Resource): kind = 'jar' def match_path(self, path): # An owner always has at most one jar file, so # always match return True class ApkFile(Resource): kind = 'apk' def __init__(self, owner, variant=None, version=None, package=None, uiauto=False, exact_abi=False, supported_abi=None, min_version=None, max_version=None): super(ApkFile, self).__init__(owner) self.variant = variant self.version = version self.max_version = max_version self.min_version = min_version self.package = package self.uiauto = uiauto self.exact_abi = exact_abi self.supported_abi = supported_abi def match_path(self, path): ext = os.path.splitext(path)[1].lower() return ext == '.apk' def match(self, path): name_matches = True version_matches = True version_range_matches = True package_matches = True abi_matches = True uiauto_matches = uiauto_test_matches(path, self.uiauto) if self.version: version_matches = apk_version_matches(path, self.version) if self.max_version or self.min_version: version_range_matches = apk_version_matches_range(path, self.min_version, self.max_version) if self.variant: name_matches = file_name_matches(path, self.variant) if self.package: package_matches = package_name_matches(path, self.package) if self.supported_abi: abi_matches = apk_abi_matches(path, self.supported_abi, self.exact_abi) return name_matches and version_matches and \ version_range_matches and uiauto_matches \ and package_matches and abi_matches def __str__(self): text = '<{}\'s apk'.format(self.owner) if self.variant: text += ' {}'.format(self.variant) if self.version: text += ' {}'.format(self.version) if self.uiauto: text += 'uiautomator test' text += '>' return text class ResourceGetter(Plugin): """ Base class for implementing resolvers. Defines resolver interface. Resolvers are responsible for discovering resources (such as particular kinds of files) they know about based on the parameters that are passed to them. Each resolver also has a dict of attributes that describe it's operation, and may be used to determine which get invoked. There is no pre-defined set of attributes and resolvers may define their own. Class attributes: :name: Name that uniquely identifies this getter. Must be set by any concrete subclass. :priority: Priority with which this getter will be invoked. This should be one of the standard priorities specified in ``GetterPriority`` enumeration. If not set, this will default to ``GetterPriority.environment``. """ name = None kind = 'resource_getter' def register(self, resolver): raise NotImplementedError() def initialize(self): pass def __str__(self): return ''.format(self.name) class ResourceResolver(object): """ Discovers and registers getters, and then handles requests for resources using registered getters. """ def __init__(self, loader=pluginloader): self.loader = loader self.logger = logging.getLogger('resolver') self.getters = [] self.sources = prioritylist() def load(self): for gettercls in self.loader.list_plugins('resource_getter'): self.logger.debug('Loading getter {}'.format(gettercls.name)) getter = self.loader.get_plugin(name=gettercls.name, kind="resource_getter") with log.indentcontext(): getter.initialize() getter.register(self) self.getters.append(getter) def register(self, source, priority=SourcePriority.local): msg = 'Registering "{}" with priority "{}"' self.logger.debug(msg.format(get_object_name(source), priority)) self.sources.add(source, priority) def get(self, resource, strict=True): """ Uses registered getters to attempt to discover a resource of the specified kind and matching the specified criteria. Returns path to the resource that has been discovered. If a resource has not been discovered, this will raise a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return ``None``. """ self.logger.debug('Resolving {}'.format(resource)) for source in self.sources: source_name = get_object_name(source) self.logger.debug('Trying {}'.format(source_name)) result = source(resource) if result is not None: msg = 'Resource {} found using {}:' self.logger.debug(msg.format(resource, source_name)) self.logger.debug('\t{}'.format(result)) return result if strict: raise ResourceError('{} could not be found'.format(resource)) self.logger.debug('Resource {} not found.'.format(resource)) return None def apk_version_matches(path, version): version = list_or_string(version) info = get_cacheable_apk_info(path) for v in version: if v in (info.version_name, info.version_code): return True if loose_version_matching(v, info.version_name): return True return False def apk_version_matches_range(path, min_version=None, max_version=None): info = get_cacheable_apk_info(path) return range_version_matching(info.version_name, min_version, max_version) def range_version_matching(apk_version, min_version=None, max_version=None): if not apk_version: return False apk_version = version_tuple(apk_version) if max_version: max_version = version_tuple(max_version) if apk_version > max_version: return False if min_version: min_version = version_tuple(min_version) if apk_version < min_version: return False return True def loose_version_matching(config_version, apk_version): config_version = version_tuple(config_version) apk_version = version_tuple(apk_version) if len(apk_version) < len(config_version): return False # More specific version requested than available for i in range(len(config_version)): if config_version[i] != apk_version[i]: return False return True def file_name_matches(path, pattern): filename = os.path.basename(path) if pattern in filename: return True if re.search(pattern, filename): return True return False def uiauto_test_matches(path, uiauto): info = get_cacheable_apk_info(path) return uiauto == ('com.arm.wa.uiauto' in info.package) def package_name_matches(path, package): info = get_cacheable_apk_info(path) return info.package == package def apk_abi_matches(path, supported_abi, exact_abi=False): supported_abi = list_or_string(supported_abi) info = get_cacheable_apk_info(path) # If no native code present, suitable for all devices. if not info.native_code: return True if exact_abi: # Only check primary return supported_abi[0] in info.native_code else: for abi in supported_abi: if abi in info.native_code: return True return False ================================================ FILE: wa/framework/run.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Because of use of Enum (dynamic attrs) # pylint: disable=no-member import uuid from collections import OrderedDict, Counter from copy import copy from datetime import datetime, timedelta from wa.framework.configuration.core import Status from wa.utils.serializer import Podable class RunInfo(Podable): """ Information about the current run, such as its unique ID, run time, etc. """ _pod_serialization_version = 1 @staticmethod def from_pod(pod): pod = RunInfo._upgrade_pod(pod) uid = pod.pop('uuid') _pod_version = pod.pop('_pod_version') duration = pod.pop('duration') if uid is not None: uid = uuid.UUID(uid) instance = RunInfo(**pod) instance._pod_version = _pod_version # pylint: disable=protected-access instance.uuid = uid instance.duration = duration if duration is None else timedelta(seconds=duration) return instance def __init__(self, run_name=None, project=None, project_stage=None, start_time=None, end_time=None, duration=None): super(RunInfo, self).__init__() self.uuid = uuid.uuid4() self.run_name = run_name self.project = project self.project_stage = project_stage self.start_time = start_time self.end_time = end_time self.duration = duration def to_pod(self): d = super(RunInfo, self).to_pod() d.update(copy(self.__dict__)) d['uuid'] = str(self.uuid) if self.duration is None: d['duration'] = self.duration else: d['duration'] = self.duration.total_seconds() return d @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod class RunState(Podable): """ Represents the state of a WA run. """ _pod_serialization_version = 1 @staticmethod def from_pod(pod): instance = super(RunState, RunState).from_pod(pod) instance.status = Status.from_pod(pod['status']) instance.timestamp = pod['timestamp'] jss = [JobState.from_pod(j) for j in pod['jobs']] instance.jobs = OrderedDict(((js.id, js.iteration), js) for js in jss) return instance @property def num_completed_jobs(self): return sum(1 for js in self.jobs.values() if js.status > Status.RUNNING) def __init__(self): super(RunState, self).__init__() self.jobs = OrderedDict() self.status = Status.NEW self.timestamp = datetime.utcnow() def add_job(self, job): self.jobs[(job.state.id, job.state.iteration)] = job.state def get_status_counts(self): counter = Counter() for job_state in self.jobs.values(): counter[job_state.status] += 1 return counter def to_pod(self): pod = super(RunState, self).to_pod() pod['status'] = self.status.to_pod() pod['timestamp'] = self.timestamp pod['jobs'] = [j.to_pod() for j in self.jobs.values()] return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) pod['status'] = Status(pod['status']).to_pod() return pod class JobState(Podable): _pod_serialization_version = 1 @staticmethod def from_pod(pod): pod = JobState._upgrade_pod(pod) instance = JobState(pod['id'], pod['label'], pod['iteration'], Status.from_pod(pod['status'])) instance.retries = pod['retries'] instance.timestamp = pod['timestamp'] return instance @property def output_name(self): return '{}-{}-{}'.format(self.id, self.label, self.iteration) def __init__(self, id, label, iteration, status): # pylint: disable=redefined-builtin super(JobState, self).__init__() self.id = id self.label = label self.iteration = iteration self.status = status self.retries = 0 self.timestamp = datetime.utcnow() def to_pod(self): pod = super(JobState, self).to_pod() pod['id'] = self.id pod['label'] = self.label pod['iteration'] = self.iteration pod['status'] = self.status.to_pod() pod['retries'] = self.retries pod['timestamp'] = self.timestamp return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) pod['status'] = Status(pod['status']).to_pod() return pod ================================================ FILE: wa/framework/signal.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module wraps louie signalling mechanism. It relies on modified version of louie that has prioritization added to handler invocation. """ import sys import logging from contextlib import contextmanager from louie import dispatcher, saferef # pylint: disable=wrong-import-order from louie.dispatcher import _remove_receiver import wrapt from wa.utils.types import prioritylist, enum logger = logging.getLogger('signal') class Signal(object): """ This class implements the signals to be used for notifiying callbacks registered to respond to different states and stages of the execution of workload automation. """ def __init__(self, name, description='no description', invert_priority=False): """ Instantiates a Signal. :param name: name is the identifier of the Signal object. Signal instances with the same name refer to the same execution stage/stage. :param invert_priority: boolean parameter that determines whether multiple callbacks for the same signal should be ordered with ascending or descending priorities. Typically this flag should be set to True if the Signal is triggered AFTER an a state/stage has been reached. That way callbacks with high priorities will be called right after the event has occured. """ self.name = name self.description = description self.invert_priority = invert_priority def __str__(self): return self.name __repr__ = __str__ def __hash__(self): return id(self.name) # Signals associated with run-related events RUN_STARTED = Signal('run-started', 'sent at the beginning of the run') RUN_INITIALIZED = Signal('run-initialized', 'set after the run has been initialized') RUN_ABORTED = Signal('run-aborted', 'set when the run has been aborted due to a keyboard interrupt') RUN_FAILED = Signal('run-failed', 'set if the run has failed to complete all jobs.') RUN_FINALIZED = Signal('run-finalized', 'set after the run has been finalized') RUN_COMPLETED = Signal('run-completed', 'set upon completion of the run (regardless of whether or not it has failed') # Signals associated with job-related events JOB_STARTED = Signal('job-started', 'set when a a new job has been started') JOB_ABORTED = Signal('job-aborted', description=''' sent if a job has been aborted due to a keyboard interrupt. .. note:: While the status of every job that has not had a chance to run due to being interrupted will be set to "ABORTED", this signal will only be sent for the job that was actually running at the time. ''') JOB_FAILED = Signal('job-failed', description='set if the job has failed') JOB_RESTARTED = Signal('job-restarted') JOB_COMPLETED = Signal('job-completed') # Signals associated with particular stages of workload execution BEFORE_WORKLOAD_INITIALIZED = Signal('before-workload-initialized', invert_priority=True) SUCCESSFUL_WORKLOAD_INITIALIZED = Signal('successful-workload-initialized') AFTER_WORKLOAD_INITIALIZED = Signal('after-workload-initialized') BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup', invert_priority=True) SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup') AFTER_WORKLOAD_SETUP = Signal('after-workload-setup') BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution', invert_priority=True) SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution') AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution') BEFORE_WORKLOAD_RESULT_EXTRACTION = Signal('before-workload-result-extracton', invert_priority=True) SUCCESSFUL_WORKLOAD_RESULT_EXTRACTION = Signal('successful-workload-result-extracton') AFTER_WORKLOAD_RESULT_EXTRACTION = Signal('after-workload-result-extracton') BEFORE_WORKLOAD_OUTPUT_UPDATE = Signal('before-workload-output-update', invert_priority=True) SUCCESSFUL_WORKLOAD_OUTPUT_UPDATE = Signal('successful-workload-output-update') AFTER_WORKLOAD_OUTPUT_UPDATE = Signal('after-workload-output-update') BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown', invert_priority=True) SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown') AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown') BEFORE_WORKLOAD_FINALIZED = Signal('before-workload-finalized', invert_priority=True) SUCCESSFUL_WORKLOAD_FINALIZED = Signal('successful-workload-finalized') AFTER_WORKLOAD_FINALIZED = Signal('after-workload-finalized') # Signals indicating exceptional conditions ERROR_LOGGED = Signal('error-logged') WARNING_LOGGED = Signal('warning-logged') # These are paired events -- if the before_event is sent, the after_ signal is # guaranteed to also be sent. In particular, the after_ signals will be sent # even if there is an error, so you cannot assume in the handler that the # device has booted successfully. In most cases, you should instead use the # non-paired signals below. BEFORE_RUN_INIT = Signal('before-run-init', invert_priority=True) SUCCESSFUL_RUN_INIT = Signal('successful-run-init') AFTER_RUN_INIT = Signal('after-run-init') BEFORE_JOB = Signal('before-job', invert_priority=True) SUCCESSFUL_JOB = Signal('successful-job') AFTER_JOB = Signal('after-job') BEFORE_JOB_QUEUE_EXECUTION = Signal('before-job-queue-execution', invert_priority=True) SUCCESSFUL_JOB_QUEUE_EXECUTION = Signal('successful-job-queue-execution') AFTER_JOB_QUEUE_EXECUTION = Signal('after-job-queue-execution') BEFORE_JOB_TARGET_CONFIG = Signal('before-job-target-config', invert_priority=True) SUCCESSFUL_JOB_TARGET_CONFIG = Signal('successful-job-target-config') AFTER_JOB_TARGET_CONFIG = Signal('after-job-target-config') BEFORE_JOB_OUTPUT_PROCESSED = Signal('before-job-output-processed', invert_priority=True) SUCCESSFUL_JOB_OUTPUT_PROCESSED = Signal('successful-job-output-processed') AFTER_JOB_OUTPUT_PROCESSED = Signal('after-job-output-processed') BEFORE_FLASHING = Signal('before-flashing', invert_priority=True) SUCCESSFUL_FLASHING = Signal('successful-flashing') AFTER_FLASHING = Signal('after-flashing') BEFORE_REBOOT = Signal('before-reboot', invert_priority=True) SUCCESSFUL_REBOOT = Signal('successful-reboot') AFTER_REBOOT = Signal('after-reboot') BEFORE_TARGET_CONNECT = Signal('before-target-connect', invert_priority=True) SUCCESSFUL_TARGET_CONNECT = Signal('successful-target-connect') AFTER_TARGET_CONNECT = Signal('after-target-connect') BEFORE_TARGET_DISCONNECT = Signal('before-target-disconnect', invert_priority=True) SUCCESSFUL_TARGET_DISCONNECT = Signal('successful-target-disconnect') AFTER_TARGET_DISCONNECT = Signal('after-target-disconnect') BEFORE_RUN_OUTPUT_PROCESSED = Signal( 'before-run-output-processed', invert_priority=True) SUCCESSFUL_RUN_OUTPUT_PROCESSED = Signal( 'successful-run-output-processed') AFTER_RUN_OUTPUT_PROCESSED = Signal( 'after-run-output-processed') CallbackPriority = enum(['extremely_low', 'very_low', 'low', 'normal', 'high', 'very_high', 'extremely_high'], -30, 10) class _prioritylist_wrapper(prioritylist): """ This adds a NOP append() method so that when louie invokes it to add the handler to receivers, nothing will happen; the handler is actually added inside the connect() below according to priority, before louie's connect() gets invoked. """ def append(self, *args, **kwargs): pass def connect(handler, signal, sender=dispatcher.Any, priority=0): """ Connects a callback to a signal, so that the callback will be automatically invoked when that signal is sent. Parameters: :handler: This can be any callable that that takes the right arguments for the signal. For most signals this means a single argument that will be an ``ExecutionContext`` instance. But please see documentation for individual signals in the :ref:`signals reference `. :signal: The signal to which the handler will be subscribed. Please see :ref:`signals reference ` for the list of standard WA signals. .. note:: There is nothing that prevents instruments from sending their own signals that are not part of the standard set. However the signal must always be an :class:`wa.core.signal.Signal` instance. :sender: The handler will be invoked only for the signals emitted by this sender. By default, this is set to :class:`louie.dispatcher.Any`, so the handler will be invoked for signals from any sender. :priority: An integer (positive or negative) the specifies the priority of the handler. Handlers with higher priority will be called before handlers with lower priority. The call order of handlers with the same priority is not specified. Defaults to 0. .. note:: Priorities for some signals are inverted (so highest priority handlers get executed last). Please see :ref:`signals reference ` for details. """ logger.debug('Connecting {} to {}({}) with priority {}'.format(handler, signal, sender, priority)) if getattr(signal, 'invert_priority', False): priority = -priority senderkey = id(sender) if senderkey in dispatcher.connections: signals = dispatcher.connections[senderkey] else: dispatcher.connections[senderkey] = signals = {} if signal in signals: receivers = signals[signal] else: receivers = signals[signal] = _prioritylist_wrapper() dispatcher.connect(handler, signal, sender) receivers.add(saferef.safe_ref(handler, on_delete=_remove_receiver), priority) def disconnect(handler, signal, sender=dispatcher.Any): """ Disconnect a previously connected handler form the specified signal, optionally, only for the specified sender. Parameters: :handler: The callback to be disconnected. :signal: The signal the handler is to be disconnected form. It will be an :class:`wa.core.signal.Signal` instance. :sender: If specified, the handler will only be disconnected from the signal sent by this sender. """ logger.debug('Disconnecting {} from {}({})'.format(handler, signal, sender)) dispatcher.disconnect(handler, signal, sender) def send(signal, sender=dispatcher.Anonymous, *args, **kwargs): """ Sends a signal, causing connected handlers to be invoked. Parameters: :signal: Signal to be sent. This must be an instance of :class:`wa.core.signal.Signal` or its subclasses. :sender: The sender of the signal (typically, this would be ``self``). Some handlers may only be subscribed to signals from a particular sender. The rest of the parameters will be passed on as aruments to the handler. """ logger.debug('Sending {} from {}'.format(signal, sender)) return dispatcher.send(signal, sender, *args, **kwargs) # This will normally be set to log_error() by init_logging(); see wa.utils.log # Done this way to prevent a circular import dependency. log_error_func = logger.error def safe_send(signal, sender=dispatcher.Anonymous, propagate=None, *args, **kwargs): """ Same as ``send``, except this will catch and log all exceptions raised by handlers, except those specified in ``propagate`` argument (defaults to just ``[KeyboardInterrupt]``). """ if propagate is None: propagate = [KeyboardInterrupt] try: logger.debug('Safe-sending {} from {}'.format(signal, sender)) send(signal, sender, *args, **kwargs) except Exception as e: # pylint: disable=broad-except if any(isinstance(e, p) for p in propagate): raise e log_error_func(e) @contextmanager def wrap(signal_name, sender=dispatcher.Anonymous, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg """Wraps the suite in before/after signals, ensuring that after signal is always sent.""" safe = kwargs.pop('safe', False) signal_name = signal_name.upper().replace('-', '_') send_func = safe_send if safe else send try: before_signal = globals()['BEFORE_' + signal_name] success_signal = globals()['SUCCESSFUL_' + signal_name] after_signal = globals()['AFTER_' + signal_name] except KeyError: raise ValueError('Invalid wrapped signal name: {}'.format(signal_name)) try: send_func(before_signal, sender, *args, **kwargs) yield send_func(success_signal, sender, *args, **kwargs) finally: _, exc, _ = sys.exc_info() if exc: log_error_func(exc) send_func(after_signal, sender, *args, **kwargs) def wrapped(signal_name, sender=dispatcher.Anonymous, safe=False): """A decorator for wrapping function in signal dispatch.""" @wrapt.decorator def signal_wrapped(wrapped_func, _, args, kwargs): def signal_wrapper(*args, **kwargs): with wrap(signal_name, sender, safe): return wrapped_func(*args, **kwargs) return signal_wrapper(*args, **kwargs) return signal_wrapped ================================================ FILE: wa/framework/target/__init__.py ================================================ ================================================ FILE: wa/framework/target/assistant.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import shutil import sys import tempfile import threading import time from wa.framework.exception import WorkerThreadError from wa.framework.plugin import Parameter from wa.utils.android import LogcatParser from wa.utils.misc import touch import wa.framework.signal as signal class LinuxAssistant(object): parameters = [] def __init__(self, target): self.target = target def initialize(self): pass def start(self): pass def extract_results(self, context): pass def stop(self): pass def finalize(self): pass class AndroidAssistant(object): parameters = [ Parameter('disable_selinux', kind=bool, default=True, description=""" If ``True``, the default, and the target is rooted, an attempt will be made to disable SELinux by running ``setenforce 0`` on the target at the beginning of the run. """), Parameter('logcat_poll_period', kind=int, constraint=lambda x: x > 0, description=""" Polling period for logcat in seconds. If not specified, no polling will be used. Logcat buffer on android is of limited size and it cannot be adjusted at run time. Depending on the amount of logging activity, the buffer may not be enought to capture comlete trace for a workload execution. For those situations, logcat may be polled periodically during the course of the run and stored in a temporary locaiton on the host. Setting the value of the poll period enables this behavior. """), Parameter('stay_on_mode', kind=int, constraint=lambda x: 0 <= x <= 7, description=""" Specify whether the screen should stay on while the device is charging: 0: never stay on 1: with AC charger 2: with USB charger 4: with wireless charger Values can be OR-ed together to produce combinations, for instance ``7`` will cause the screen to stay on when charging under any method. """), ] def __init__(self, target, logcat_poll_period=None, disable_selinux=True, stay_on_mode=None): self.target = target self.logcat_poll_period = logcat_poll_period self.disable_selinux = disable_selinux self.stay_on_mode = stay_on_mode self.orig_stay_on_mode = self.target.get_stay_on_mode() if stay_on_mode is not None else None self.logcat_poller = None self.logger = logging.getLogger('logcat') self._logcat_marker_msg = None self._logcat_marker_tag = None signal.connect(self._before_workload, signal.BEFORE_WORKLOAD_EXECUTION) if self.logcat_poll_period: signal.connect(self._after_workload, signal.AFTER_WORKLOAD_EXECUTION) def initialize(self): if self.target.is_rooted and self.disable_selinux: self.do_disable_selinux() if self.stay_on_mode is not None: self.target.set_stay_on_mode(self.stay_on_mode) def start(self): if self.logcat_poll_period: self.logcat_poller = LogcatPoller(self.target, self.logcat_poll_period) self.logcat_poller.start() else: if not self._logcat_marker_msg: self._logcat_marker_msg = 'WA logcat marker for wrap detection' self._logcat_marker_tag = 'WAlog' def stop(self): if self.logcat_poller: self.logcat_poller.stop() def finalize(self): if self.stay_on_mode is not None: self.target.set_stay_on_mode(self.orig_stay_on_mode) def extract_results(self, context): logcat_file = os.path.join(context.output_directory, 'logcat.log') self.dump_logcat(logcat_file) context.add_artifact('logcat', logcat_file, kind='log') self.clear_logcat() if not self._check_logcat_nowrap(logcat_file): self.logger.warning('The main logcat buffer wrapped and lost data;' ' results that rely on this buffer may be' ' inaccurate or incomplete.' ) def dump_logcat(self, outfile): if self.logcat_poller: self.logcat_poller.write_log(outfile) else: self.target.dump_logcat(outfile, logcat_format='threadtime') def clear_logcat(self): if self.logcat_poller: self.logcat_poller.clear_buffer() else: self.target.clear_logcat() def _before_workload(self, _): if self.logcat_poller: self.logcat_poller.start_logcat_wrap_detect() else: self.insert_logcat_marker() def _after_workload(self, _): self.logcat_poller.stop_logcat_wrap_detect() def _check_logcat_nowrap(self, outfile): if self.logcat_poller: return self.logcat_poller.check_logcat_nowrap(outfile) else: parser = LogcatParser() for event in parser.parse(outfile): if (event.tag == self._logcat_marker_tag and event.message == self._logcat_marker_msg): return True return False def insert_logcat_marker(self): self.logger.debug('Inserting logcat marker') self.target.execute( 'log -t "{}" "{}"'.format( self._logcat_marker_tag, self._logcat_marker_msg ) ) def do_disable_selinux(self): # SELinux was added in Android 4.3 (API level 18). Trying to # 'getenforce' in earlier versions will produce an error. if self.target.get_sdk_version() >= 18: se_status = self.target.execute('getenforce', as_root=True).strip() if se_status == 'Enforcing': self.target.execute('setenforce 0', as_root=True, check_exit_code=False) class LogcatPoller(threading.Thread): def __init__(self, target, period=60, timeout=30): super(LogcatPoller, self).__init__() self.target = target self.logger = logging.getLogger('logcat') self.period = period self.timeout = timeout self.stop_signal = threading.Event() self.lock = threading.RLock() self.buffer_file = tempfile.mktemp() self.last_poll = 0 self.daemon = True self.exc = None self._logcat_marker_tag = 'WALog' self._logcat_marker_msg = 'WA logcat marker for wrap detection:{}' self._marker_count = 0 self._start_marker = None self._end_marker = None def run(self): self.logger.debug('Starting polling') try: self.insert_logcat_marker() while True: if self.stop_signal.is_set(): break with self.lock: current_time = time.time() if (current_time - self.last_poll) >= self.period: self.poll() self.insert_logcat_marker() time.sleep(0.5) except Exception: # pylint: disable=W0703 self.exc = WorkerThreadError(self.name, sys.exc_info()) self.logger.debug('Polling stopped') def stop(self): self.logger.debug('Stopping logcat polling') self.stop_signal.set() self.join(self.timeout) if self.is_alive(): self.logger.error('Could not join logcat poller thread.') if self.exc: raise self.exc # pylint: disable=E0702 def clear_buffer(self): self.logger.debug('Clearing logcat buffer') with self.lock: self.target.clear_logcat() touch(self.buffer_file) def write_log(self, outfile): with self.lock: self.poll() if os.path.isfile(self.buffer_file): shutil.copy(self.buffer_file, outfile) else: # there was no logcat trace at this time touch(outfile) def close(self): self.logger.debug('Closing poller') if os.path.isfile(self.buffer_file): os.remove(self.buffer_file) def poll(self): self.last_poll = time.time() self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout, logcat_format='threadtime') self.target.clear_logcat() def insert_logcat_marker(self): self.logger.debug('Inserting logcat marker') with self.lock: self.target.execute( 'log -t "{}" "{}"'.format( self._logcat_marker_tag, self._logcat_marker_msg.format(self._marker_count) ) ) self._marker_count += 1 def check_logcat_nowrap(self, outfile): parser = LogcatParser() counter = self._start_marker for event in parser.parse(outfile): message = self._logcat_marker_msg.split(':')[0] if not (event.tag == self._logcat_marker_tag and event.message.split(':')[0] == message): continue number = int(event.message.split(':')[1]) if number > counter: return False elif number == counter: counter += 1 if counter == self._end_marker: return True return False def start_logcat_wrap_detect(self): with self.lock: self._start_marker = self._marker_count self.insert_logcat_marker() def stop_logcat_wrap_detect(self): with self.lock: self._end_marker = self._marker_count class ChromeOsAssistant(LinuxAssistant): parameters = LinuxAssistant.parameters + AndroidAssistant.parameters def __init__(self, target, logcat_poll_period=None, disable_selinux=True): super(ChromeOsAssistant, self).__init__(target) if target.supports_android: self.android_assistant = AndroidAssistant(target.android_container, logcat_poll_period, disable_selinux) else: self.android_assistant = None def start(self): super(ChromeOsAssistant, self).start() if self.android_assistant: self.android_assistant.start() def extract_results(self, context): super(ChromeOsAssistant, self).extract_results(context) if self.android_assistant: self.android_assistant.extract_results(context) def stop(self): super(ChromeOsAssistant, self).stop() if self.android_assistant: self.android_assistant.stop() ================================================ FILE: wa/framework/target/config.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from copy import copy class TargetConfig(dict): """ Represents a configuration for a target. """ def __init__(self, config=None): dict.__init__(self) if isinstance(config, TargetConfig): self.__dict__ = copy(config.__dict__) elif hasattr(config, 'iteritems'): for k, v in config.iteritems: self.set(k, v) elif config: raise ValueError(config) def set(self, name, value): setattr(self, name, value) ================================================ FILE: wa/framework/target/descriptor.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import inspect from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget, ChromeOsTarget, Platform, Juno, TC2, Gem5SimulationPlatform, AdbConnection, SshConnection, LocalConnection, TelnetConnection, Gem5Connection) from devlib.target import DEFAULT_SHELL_PROMPT from devlib.utils.ssh import DEFAULT_SSH_SUDO_COMMAND from wa.framework import pluginloader from wa.framework.configuration.core import get_config_point_map from wa.framework.exception import PluginLoaderError from wa.framework.plugin import Plugin, Parameter from wa.framework.target.assistant import LinuxAssistant, AndroidAssistant, ChromeOsAssistant from wa.utils.types import list_of_strings, list_of_ints, regex, identifier, caseless_string from wa.utils.misc import isiterable def list_target_descriptions(loader=pluginloader): targets = {} for cls in loader.list_target_descriptors(): descriptor = cls() for desc in descriptor.get_descriptions(): if desc.name in targets: msg = 'Duplicate target "{}" returned by {} and {}' prev_dtor = targets[desc.name].source raise PluginLoaderError(msg.format(desc.name, prev_dtor.name, descriptor.name)) targets[desc.name] = desc return list(targets.values()) def get_target_description(name, loader=pluginloader): for tdesc in list_target_descriptions(loader): if tdesc.name == name: return tdesc raise ValueError('Could not find target descriptor "{}"'.format(name)) def instantiate_target(tdesc, params, connect=None, extra_platform_params=None): # pylint: disable=too-many-locals,too-many-branches target_params = get_config_point_map(tdesc.target_params) platform_params = get_config_point_map(tdesc.platform_params) conn_params = get_config_point_map(tdesc.conn_params) assistant_params = get_config_point_map(tdesc.assistant_params) tp, pp, cp = {}, {}, {} for supported_params, new_params in (target_params, tp), (platform_params, pp), (conn_params, cp): for name, value in supported_params.items(): if value.default and name == value.name: new_params[name] = value.default for name, value in params.items(): if name in target_params: if not target_params[name].deprecated: tp[name] = value elif name in platform_params: if not platform_params[name].deprecated: pp[name] = value elif name in conn_params: if not conn_params[name].deprecated: cp[name] = value elif name in assistant_params: pass else: msg = 'Unexpected parameter for {}: {}' raise ValueError(msg.format(tdesc.name, name)) for pname, pval in (extra_platform_params or {}).items(): if pname in pp: raise RuntimeError('Platform parameter clash: {}'.format(pname)) pp[pname] = pval tp['platform'] = (tdesc.platform or Platform)(**pp) if cp: tp['connection_settings'] = cp if tdesc.connection: tp['conn_cls'] = tdesc.connection if connect is not None: tp['connect'] = connect return tdesc.target(**tp) def instantiate_assistant(tdesc, params, target): assistant_params = {} for param in tdesc.assistant_params: if param.name in params: assistant_params[param.name] = params[param.name] elif param.default: assistant_params[param.name] = param.default return tdesc.assistant(target, **assistant_params) class TargetDescription(object): def __init__(self, name, source, description=None, target=None, platform=None, conn=None, assistant=None, target_params=None, platform_params=None, conn_params=None, assistant_params=None): self.name = name self.source = source self.description = description self.target = target self.platform = platform self.connection = conn self.assistant = assistant self._set('target_params', target_params) self._set('platform_params', platform_params) self._set('conn_params', conn_params) self._set('assistant_params', assistant_params) def get_default_config(self): param_attrs = ['target_params', 'platform_params', 'conn_params', 'assistant_params'] config = {} for pattr in param_attrs: for p in getattr(self, pattr): if not p.deprecated: config[p.name] = p.default return config def _set(self, attr, vals): if vals is None: vals = [] elif isiterable(vals): if hasattr(vals, 'values'): vals = list(vals.values()) else: msg = '{} must be iterable; got "{}"' raise ValueError(msg.format(attr, vals)) setattr(self, attr, vals) class TargetDescriptor(Plugin): kind = 'target_descriptor' def get_descriptions(self): # pylint: disable=no-self-use return [] COMMON_TARGET_PARAMS = [ Parameter('working_directory', kind=str, description=''' On-target working directory that will be used by WA. This directory must be writable by the user WA logs in as without the need for privilege elevation. '''), Parameter('executables_directory', kind=str, description=''' On-target directory where WA will install its executable binaries. This location must allow execution. This location does *not* need to be writable by unprivileged users or rooted devices (WA will install with elevated privileges as necessary). '''), Parameter('modules', kind=list, description=''' A list of additional modules to be installed for the target. ``devlib`` implements functionality for particular subsystems as modules. A number of "default" modules (e.g. for cpufreq subsystem) are loaded automatically, unless explicitly disabled. If additional modules need to be loaded, they may be specified using this parameter. Please see ``devlib`` documentation for information on the available modules. '''), Parameter('load_default_modules', kind=bool, default=True, description=''' A number of modules (e.g. for working with the cpufreq subsystem) are loaded by default when a Target is instantiated. Setting this to ``True`` would suppress that, ensuring that only the base Target interface is initialized. You may want to set this to ``False`` if there is a problem with one or more default modules on your platform (e.g. your device is unrooted and cpufreq is not accessible to unprivileged users), or if ``Target`` initialization is taking too long for your platform. '''), Parameter('shell_prompt', kind=regex, default=DEFAULT_SHELL_PROMPT, description=''' A regex that matches the shell prompt on the target. '''), Parameter('max_async', kind=int, default=50, description=''' The maximum number of concurent asynchronous connections to the target maintained at any time. '''), ] COMMON_PLATFORM_PARAMS = [ Parameter('core_names', kind=list_of_strings, description=''' List of names of CPU cores in the order that they appear to the kernel. If not specified, it will be inferred from the platform. '''), Parameter('core_clusters', kind=list_of_ints, description=''' Cluster mapping corresponding to the cores in ``core_names``. Cluster indexing starts at ``0``. If not specified, this will be inferred from ``core_names`` -- consecutive cores with the same name will be assumed to share a cluster. '''), Parameter('big_core', kind=str, description=''' The name of the big cores in a big.LITTLE system. If not specified, this will be inferred, either from the name (if one of the names in ``core_names`` matches known big cores), or by assuming that the last cluster is big. '''), Parameter('model', kind=str, description=''' Hardware model of the platform. If not specified, an attempt will be made to read it from target. '''), Parameter('modules', kind=list, description=''' An additional list of modules to be loaded into the target. '''), ] VEXPRESS_PLATFORM_PARAMS = [ Parameter('serial_port', kind=str, description=''' The serial device/port on the host for the initial connection to the target (used for early boot, flashing, etc). '''), Parameter('baudrate', kind=int, description=''' Baud rate for the serial connection. '''), Parameter('vemsd_mount', kind=str, description=''' VExpress MicroSD card mount location. This is a MicroSD card in the VExpress device that is mounted on the host via USB. The card contains configuration files for the platform and firmware and kernel images to be flashed. '''), Parameter('bootloader', kind=str, allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'], description=''' Selects the bootloader mechanism used by the board. Depending on firmware version, a number of possible boot mechanisms may be use. Please see ``devlib`` documentation for descriptions. '''), Parameter('hard_reset_method', kind=str, allowed_values=['dtr', 'reboottxt'], description=''' There are a couple of ways to reset VersatileExpress board if the software running on the board becomes unresponsive. Both require configuration to be enabled (please see ``devlib`` documentation). ``dtr``: toggle the DTR line on the serial connection ``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount. '''), ] GEM5_PLATFORM_PARAMS = [ Parameter('gem5_bin', kind=str, mandatory=True, description=''' Path to the gem5 binary '''), Parameter('gem5_args', kind=str, mandatory=True, description=''' Arguments to be passed to the gem5 binary '''), Parameter('gem5_virtio', kind=str, mandatory=True, description=''' VirtIO device setup arguments to be passed to gem5. VirtIO is used to transfer files between the simulation and the host. '''), Parameter('name', kind=str, default='gem5', description=''' The name for the gem5 "device". '''), ] CONNECTION_PARAMS = { AdbConnection: [ Parameter( 'device', kind=str, aliases=['adb_name'], description=""" ADB device name """), Parameter( 'adb_server', kind=str, description=""" ADB server to connect to. """), Parameter( 'adb_port', kind=int, description=""" ADB port to connect to. """), Parameter( 'poll_transfers', kind=bool, default=True, description=""" File transfers will be polled for activity. Inactive file transfers are cancelled. """), Parameter( 'start_transfer_poll_delay', kind=int, default=30, description=""" How long to wait (s) for a transfer to complete before polling transfer activity. Requires ``poll_transfers`` to be set. """), Parameter( 'total_transfer_timeout', kind=int, default=3600, description=""" The total time to elapse before a transfer is cancelled, regardless of its activity. Requires ``poll_transfers`` to be set. """), Parameter( 'transfer_poll_period', kind=int, default=30, description=""" The period at which transfer activity is sampled. Requires ``poll_transfers`` to be set. Too small values may cause the destination size to appear the same over one or more sample periods, causing improper transfer cancellation. """), Parameter( 'adb_as_root', kind=bool, default=False, description=""" Specify whether the adb server should be started in root mode. """) ], SshConnection: [ Parameter( 'host', kind=str, mandatory=True, description=""" Host name or IP address of the target. """), Parameter( 'username', kind=str, mandatory=True, description=""" User name to connect with """), Parameter( 'password', kind=str, description=""" Password to use. (When connecting to a passwordless machine set to an empty string to prevent attempting ssh key authentication.) """), Parameter( 'keyfile', kind=str, description=""" Key file to use """), Parameter( 'port', kind=int, default=22, description=""" The port SSH server is listening on on the target. """), Parameter( 'strict_host_check', kind=bool, default=False, description=""" Specify whether devices should be connected to if their host key does not match the systems known host keys. """), Parameter( 'sudo_cmd', kind=str, default=DEFAULT_SSH_SUDO_COMMAND, description=""" Sudo command to use. Must have ``{}`` specified somewhere in the string it indicate where the command to be run via sudo is to go. """), Parameter( 'use_scp', kind=bool, default=False, description=""" Allow using SCP as method of file transfer instead of the default SFTP. """), Parameter( 'poll_transfers', kind=bool, default=True, description=""" File transfers will be polled for activity. Inactive file transfers are cancelled. """), Parameter( 'start_transfer_poll_delay', kind=int, default=30, description=""" How long to wait (s) for a transfer to complete before polling transfer activity. Requires ``poll_transfers`` to be set. """), Parameter( 'total_transfer_timeout', kind=int, default=3600, description=""" The total time to elapse before a transfer is cancelled, regardless of its activity. Requires ``poll_transfers`` to be set. """), Parameter( 'transfer_poll_period', kind=int, default=30, description=""" The period at which transfer activity is sampled. Requires ``poll_transfers`` to be set. Too small values may cause the destination size to appear the same over one or more sample periods, causing improper transfer cancellation. """), # Deprecated Parameters Parameter( 'telnet', kind=str, description=""" Original shell prompt to expect. """, deprecated=True), Parameter( 'password_prompt', kind=str, description=""" Password prompt to expect """, deprecated=True), Parameter( 'original_prompt', kind=str, description=""" Original shell prompt to expect. """, deprecated=True), ], TelnetConnection: [ Parameter( 'host', kind=str, mandatory=True, description=""" Host name or IP address of the target. """), Parameter( 'username', kind=str, mandatory=True, description=""" User name to connect with """), Parameter( 'password', kind=str, description=""" Password to use. """), Parameter( 'port', kind=int, description=""" The port SSH server is listening on on the target. """), Parameter( 'password_prompt', kind=str, description=""" Password prompt to expect """), Parameter( 'original_prompt', kind=str, description=""" Original shell prompt to expect. """), Parameter( 'sudo_cmd', kind=str, default="sudo -- sh -c {}", description=""" Sudo command to use. Must have ``{}`` specified somewhere in the string it indicate where the command to be run via sudo is to go. """), ], Gem5Connection: [ Parameter( 'host', kind=str, mandatory=False, description=""" Host name or IP address of the target. """), Parameter( 'username', kind=str, default='root', description=""" User name to connect to gem5 simulation. """), Parameter( 'password', kind=str, description=""" Password to use. """), Parameter( 'port', kind=int, description=""" The port SSH server is listening on on the target. """), Parameter( 'password_prompt', kind=str, description=""" Password prompt to expect """), Parameter( 'original_prompt', kind=str, description=""" Original shell prompt to expect. """), ], LocalConnection: [ Parameter( 'password', kind=str, description=""" Password to use for sudo. if not specified, the user will be prompted during intialization. """), Parameter( 'keep_password', kind=bool, default=True, description=""" If ``True`` (the default), the password will be cached in memory after it is first obtained from the user, so that the user would not be prompted for it again. """), Parameter( 'unrooted', kind=bool, default=False, description=""" Indicate that the target should be considered unrooted; do not attempt sudo or ask the user for their password. """), ], } CONNECTION_PARAMS['ChromeOsConnection'] = \ CONNECTION_PARAMS[AdbConnection] + CONNECTION_PARAMS[SshConnection] # name --> ((target_class, conn_class, unsupported_platforms), params_list, defaults) TARGETS = { 'linux': ((LinuxTarget, SshConnection, []), COMMON_TARGET_PARAMS, None), 'android': ((AndroidTarget, AdbConnection, []), COMMON_TARGET_PARAMS + [Parameter('package_data_directory', kind=str, default='/data/data', description=''' Directory containing Android data '''), ], None), 'chromeos': ((ChromeOsTarget, 'ChromeOsConnection', []), COMMON_TARGET_PARAMS + [Parameter('package_data_directory', kind=str, default='/data/data', description=''' Directory containing Android data '''), Parameter('android_working_directory', kind=str, description=''' On-target working directory that will be used by WA for the android container. This directory must be writable by the user WA logs in as without the need for privilege elevation. '''), Parameter('android_executables_directory', kind=str, description=''' On-target directory where WA will install its executable binaries for the android container. This location must allow execution. This location does *not* need to be writable by unprivileged users or rooted devices (WA will install with elevated privileges as necessary). directory must be writable by the user WA logs in as without the need for privilege elevation. '''), ], None), 'local': ((LocalLinuxTarget, LocalConnection, [Juno, Gem5SimulationPlatform, TC2]), COMMON_TARGET_PARAMS, None), } # name --> assistant ASSISTANTS = { 'linux': LinuxAssistant, 'android': AndroidAssistant, 'local': LinuxAssistant, 'chromeos': ChromeOsAssistant } # Platform specific parameter overrides. JUNO_PLATFORM_OVERRIDES = [ Parameter('baudrate', kind=int, default=115200, description=''' Baud rate for the serial connection. '''), Parameter('vemsd_mount', kind=str, default='/media/JUNO', description=''' VExpress MicroSD card mount location. This is a MicroSD card in the VExpress device that is mounted on the host via USB. The card contains configuration files for the platform and firmware and kernel images to be flashed. '''), Parameter('bootloader', kind=str, default='u-boot', allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'], description=''' Selects the bootloader mechanism used by the board. Depending on firmware version, a number of possible boot mechanisms may be use. Please see ``devlib`` documentation for descriptions. '''), Parameter('hard_reset_method', kind=str, default='dtr', allowed_values=['dtr', 'reboottxt'], description=''' There are a couple of ways to reset VersatileExpress board if the software running on the board becomes unresponsive. Both require configuration to be enabled (please see ``devlib`` documentation). ``dtr``: toggle the DTR line on the serial connection ``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount. '''), ] TC2_PLATFORM_OVERRIDES = [ Parameter('baudrate', kind=int, default=38400, description=''' Baud rate for the serial connection. '''), Parameter('vemsd_mount', kind=str, default='/media/VEMSD', description=''' VExpress MicroSD card mount location. This is a MicroSD card in the VExpress device that is mounted on the host via USB. The card contains configuration files for the platform and firmware and kernel images to be flashed. '''), Parameter('bootloader', kind=str, default='bootmon', allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'], description=''' Selects the bootloader mechanism used by the board. Depending on firmware version, a number of possible boot mechanisms may be use. Please see ``devlib`` documentation for descriptions. '''), Parameter('hard_reset_method', kind=str, default='reboottxt', allowed_values=['dtr', 'reboottxt'], description=''' There are a couple of ways to reset VersatileExpress board if the software running on the board becomes unresponsive. Both require configuration to be enabled (please see ``devlib`` documentation). ``dtr``: toggle the DTR line on the serial connection ``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount. '''), ] # name --> ((platform_class, conn_class, conn_overrides), params_list, defaults, target_overrides) # Note: normally, connection is defined by the Target name, but # platforms may choose to override it # Note: the target_overrides allows you to override common target_params for a # particular platform. Parameters you can override are in COMMON_TARGET_PARAMS # Example of overriding one of the target parameters: Replace last `None` with # a list of `Parameter` objects to be used instead. PLATFORMS = { 'generic': ((Platform, None, None), COMMON_PLATFORM_PARAMS, None, None), 'juno': ((Juno, None, [ Parameter('host', kind=str, mandatory=False, description="Host name or IP address of the target."), ] ), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS, JUNO_PLATFORM_OVERRIDES, None), 'tc2': ((TC2, None, None), COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS, TC2_PLATFORM_OVERRIDES, None), 'gem5': ((Gem5SimulationPlatform, Gem5Connection, None), GEM5_PLATFORM_PARAMS, None, None), } class DefaultTargetDescriptor(TargetDescriptor): name = 'devlib_targets' description = """ The default target descriptor that provides descriptions in the form _. These map directly onto ``Target``\ s and ``Platform``\ s supplied by ``devlib``. """ def get_descriptions(self): # pylint: disable=attribute-defined-outside-init,too-many-locals result = [] for target_name, target_tuple in TARGETS.items(): (target, conn, unsupported_platforms), target_params = self._get_item(target_tuple) assistant = ASSISTANTS[target_name] conn_params = CONNECTION_PARAMS[conn] for platform_name, platform_tuple in PLATFORMS.items(): platform_target_defaults = platform_tuple[-1] platform_tuple = platform_tuple[0:-1] (platform, plat_conn, conn_defaults), platform_params = self._get_item(platform_tuple) if platform in unsupported_platforms: continue # Add target defaults specified in the Platform tuple target_params = self._override_params(target_params, platform_target_defaults) name = '{}_{}'.format(platform_name, target_name) td = TargetDescription(name, self) td.target = target td.platform = platform td.assistant = assistant td.target_params = target_params td.platform_params = platform_params td.assistant_params = assistant.parameters if plat_conn: td.conn = plat_conn td.conn_params = self._override_params(CONNECTION_PARAMS[plat_conn], conn_defaults) else: td.conn = conn td.conn_params = self._override_params(conn_params, conn_defaults) result.append(td) return result def _override_params(self, params, overrides): # pylint: disable=no-self-use ''' Returns a new list of parameters replacing any parameter with the corresponding parameter in overrides''' if not overrides: return params param_map = {p.name: p for p in params} for override in overrides: if override.name in param_map: param_map[override.name] = override # Return the list of overriden parameters return list(param_map.values()) def _get_item(self, item_tuple): cls_tuple, params, defaults = item_tuple updated_params = self._override_params(params, defaults) return cls_tuple, updated_params _adhoc_target_descriptions = [] def create_target_description(name, *args, **kwargs): name = identifier(name) for td in _adhoc_target_descriptions: if caseless_string(name) == td.name: msg = 'Target with name "{}" already exists (from source: {})' raise ValueError(msg.format(name, td.source)) stack = inspect.stack() # inspect.stack() returns a list of call frame records for the current thread # in reverse call order. So the first entry is for the current frame and next one # for the immediate caller. Each entry is a tuple in the format # (frame_object, module_path, line_no, function_name, source_lines, source_lines_index) # # Here we assign the path of the calling module as the "source" for this description. # because this might be invoked via the add_scription_for_target wrapper, we need to # check for that, and make sure that we get the info for *its* caller in that case. if stack[1][3] == 'add_description_for_target': source = stack[2][1] else: source = stack[1][1] _adhoc_target_descriptions.append(TargetDescription(name, source, *args, **kwargs)) def _get_target_defaults(target): specificity = 0 res = ('linux', TARGETS['linux']) # fallback to a generic linux target for name, ttup in TARGETS.items(): if issubclass(target, ttup[0][0]): new_spec = len(inspect.getmro(ttup[0][0])) if new_spec > specificity: res = (name, ttup) specificity = new_spec return res def add_description_for_target(target, description=None, **kwargs): (base_name, ((_, base_conn, _), base_params, _)) = _get_target_defaults(target) if 'target_params' not in kwargs: kwargs['target_params'] = base_params if 'platform' not in kwargs: kwargs['platform'] = Platform if 'platform_params' not in kwargs: for (plat, conn, _), params, _, _ in PLATFORMS.values(): if plat == kwargs['platform']: kwargs['platform_params'] = params if conn is not None and kwargs['conn'] is None: kwargs['conn'] = conn break if 'conn' not in kwargs: kwargs['conn'] = base_conn if 'conn_params' not in kwargs: kwargs['conn_params'] = CONNECTION_PARAMS.get(kwargs['conn']) if 'assistant' not in kwargs: kwargs['assistant'] = ASSISTANTS.get(base_name) create_target_description(target.name, target=target, description=description, **kwargs) class SimpleTargetDescriptor(TargetDescriptor): name = 'adhoc_targets' description = """ Returns target descriptions added with ``create_target_description``. """ def get_descriptions(self): return _adhoc_target_descriptions ================================================ FILE: wa/framework/target/info.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=protected-access import os from devlib import AndroidTarget, TargetError from devlib.target import KernelConfig, KernelVersion, Cpuinfo from devlib.utils.android import AndroidProperties from wa.framework.configuration.core import settings from wa.framework.exception import ConfigError from wa.utils.serializer import read_pod, write_pod, Podable from wa.utils.misc import atomic_write_path def cpuinfo_from_pod(pod): cpuinfo = Cpuinfo('') cpuinfo.sections = pod['cpuinfo'] lines = [] for section in cpuinfo.sections: for key, value in section.items(): line = '{}: {}'.format(key, value) lines.append(line) lines.append('') cpuinfo.text = '\n'.join(lines) return cpuinfo def kernel_version_from_pod(pod): release_string = pod['kernel_release'] version_string = pod['kernel_version'] if release_string: if version_string: kernel_string = '{} #{}'.format(release_string, version_string) else: kernel_string = release_string else: kernel_string = '#{}'.format(version_string) return KernelVersion(kernel_string) def kernel_config_from_pod(pod): config = KernelConfig('') config.typed_config._config = pod['kernel_config'] lines = [] for key, value in config.items(): if value == 'n': lines.append('# {} is not set'.format(key)) else: lines.append('{}={}'.format(key, value)) config.text = '\n'.join(lines) return config class CpufreqInfo(Podable): _pod_serialization_version = 1 @staticmethod def from_pod(pod): pod = CpufreqInfo._upgrade_pod(pod) return CpufreqInfo(**pod) def __init__(self, **kwargs): super(CpufreqInfo, self).__init__() self.available_frequencies = kwargs.pop('available_frequencies', []) self.available_governors = kwargs.pop('available_governors', []) self.related_cpus = kwargs.pop('related_cpus', []) self.driver = kwargs.pop('driver', None) self._pod_version = kwargs.pop('_pod_version', self._pod_serialization_version) def to_pod(self): pod = super(CpufreqInfo, self).to_pod() pod.update(self.__dict__) return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod def __repr__(self): return 'Cpufreq({} {})'.format(self.driver, self.related_cpus) __str__ = __repr__ class IdleStateInfo(Podable): _pod_serialization_version = 1 @staticmethod def from_pod(pod): pod = IdleStateInfo._upgrade_pod(pod) return IdleStateInfo(**pod) def __init__(self, **kwargs): super(IdleStateInfo, self).__init__() self.name = kwargs.pop('name', None) self.desc = kwargs.pop('desc', None) self.power = kwargs.pop('power', None) self.latency = kwargs.pop('latency', None) self._pod_version = kwargs.pop('_pod_version', self._pod_serialization_version) def to_pod(self): pod = super(IdleStateInfo, self).to_pod() pod.update(self.__dict__) return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod def __repr__(self): return 'IdleState({}/{})'.format(self.name, self.desc) __str__ = __repr__ class CpuidleInfo(Podable): _pod_serialization_version = 1 @staticmethod def from_pod(pod): pod = CpuidleInfo._upgrade_pod(pod) instance = CpuidleInfo() instance._pod_version = pod['_pod_version'] instance.governor = pod['governor'] instance.driver = pod['driver'] instance.states = [IdleStateInfo.from_pod(s) for s in pod['states']] return instance @property def num_states(self): return len(self.states) def __init__(self): super(CpuidleInfo, self).__init__() self.governor = None self.driver = None self.states = [] def to_pod(self): pod = super(CpuidleInfo, self).to_pod() pod['governor'] = self.governor pod['driver'] = self.driver pod['states'] = [s.to_pod() for s in self.states] return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod def __repr__(self): return 'Cpuidle({}/{} {} states)'.format( self.governor, self.driver, self.num_states) __str__ = __repr__ class CpuInfo(Podable): _pod_serialization_version = 1 @staticmethod def from_pod(pod): instance = super(CpuInfo, CpuInfo).from_pod(pod) instance.id = pod['id'] instance.name = pod['name'] instance.architecture = pod['architecture'] instance.features = pod['features'] instance.cpufreq = CpufreqInfo.from_pod(pod['cpufreq']) instance.cpuidle = CpuidleInfo.from_pod(pod['cpuidle']) return instance def __init__(self): super(CpuInfo, self).__init__() self.id = None self.name = None self.architecture = None self.features = [] self.cpufreq = CpufreqInfo() self.cpuidle = CpuidleInfo() def to_pod(self): pod = super(CpuInfo, self).to_pod() pod['id'] = self.id pod['name'] = self.name pod['architecture'] = self.architecture pod['features'] = self.features pod['cpufreq'] = self.cpufreq.to_pod() pod['cpuidle'] = self.cpuidle.to_pod() return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod def __repr__(self): return 'Cpu({} {})'.format(self.id, self.name) __str__ = __repr__ def get_target_info(target): info = TargetInfo() info.target = target.__class__.__name__ info.modules = target.modules info.os = target.os info.os_version = target.os_version info.system_id = target.system_id info.abi = target.abi info.is_rooted = target.is_rooted info.kernel_version = target.kernel_version info.kernel_config = target.config info.hostname = target.hostname info.hostid = target.hostid try: info.sched_features = target.read_value('/sys/kernel/debug/sched_features').split() except TargetError: # best effort -- debugfs might not be mounted pass for i, name in enumerate(target.cpuinfo.cpu_names): cpu = CpuInfo() cpu.id = i cpu.name = name cpu.features = target.cpuinfo.get_cpu_features(i) cpu.architecture = target.cpuinfo.architecture if target.has('cpufreq'): cpu.cpufreq.available_governors = target.cpufreq.list_governors(i) cpu.cpufreq.available_frequencies = target.cpufreq.list_frequencies(i) cpu.cpufreq.related_cpus = target.cpufreq.get_related_cpus(i) cpu.cpufreq.driver = target.cpufreq.get_driver(i) if target.has('cpuidle'): cpu.cpuidle.driver = target.cpuidle.get_driver() cpu.cpuidle.governor = target.cpuidle.get_governor() for state in target.cpuidle.get_states(i): state_info = IdleStateInfo() state_info.name = state.name state_info.desc = state.desc state_info.power = state.power state_info.latency = state.latency cpu.cpuidle.states.append(state_info) info.cpus.append(cpu) info.page_size_kb = target.page_size_kb if isinstance(target, AndroidTarget): info.screen_resolution = target.screen_resolution info.prop = target.getprop() info.android_id = target.android_id return info def read_target_info_cache(): if not os.path.exists(settings.cache_directory): os.makedirs(settings.cache_directory) if not os.path.isfile(settings.target_info_cache_file): return {} return read_pod(settings.target_info_cache_file) def write_target_info_cache(cache): if not os.path.exists(settings.cache_directory): os.makedirs(settings.cache_directory) with atomic_write_path(settings.target_info_cache_file) as at_path: write_pod(cache, at_path) def get_target_info_from_cache(system_id, cache=None): if cache is None: cache = read_target_info_cache() pod = cache.get(system_id, None) if not pod: return None _pod_version = pod.get('_pod_version', 0) if _pod_version != TargetInfo._pod_serialization_version: msg = 'Target info version mismatch. Expected {}, but found {}.\nTry deleting {}' raise ConfigError(msg.format(TargetInfo._pod_serialization_version, _pod_version, settings.target_info_cache_file)) return TargetInfo.from_pod(pod) def cache_target_info(target_info, overwrite=False, cache=None): if cache is None: cache = read_target_info_cache() if target_info.system_id in cache and not overwrite: raise ValueError('TargetInfo for {} is already in cache.'.format(target_info.system_id)) cache[target_info.system_id] = target_info.to_pod() write_target_info_cache(cache) class TargetInfo(Podable): _pod_serialization_version = 5 @staticmethod def from_pod(pod): instance = super(TargetInfo, TargetInfo).from_pod(pod) instance.target = pod['target'] instance.modules = pod['modules'] instance.abi = pod['abi'] instance.cpus = [CpuInfo.from_pod(c) for c in pod['cpus']] instance.os = pod['os'] instance.os_version = pod['os_version'] instance.system_id = pod['system_id'] instance.hostid = pod['hostid'] instance.hostname = pod['hostname'] instance.abi = pod['abi'] instance.is_rooted = pod['is_rooted'] instance.kernel_version = kernel_version_from_pod(pod) instance.kernel_config = kernel_config_from_pod(pod) instance.sched_features = pod['sched_features'] instance.page_size_kb = pod.get('page_size_kb') if instance.os == 'android': instance.screen_resolution = pod['screen_resolution'] instance.prop = AndroidProperties('') instance.prop._properties = pod['prop'] instance.android_id = pod['android_id'] return instance def __init__(self): super(TargetInfo, self).__init__() self.target = None self.modules = [] self.cpus = [] self.os = None self.os_version = None self.system_id = None self.hostid = None self.hostname = None self.abi = None self.is_rooted = None self.kernel_version = None self.kernel_config = None self.sched_features = None self.screen_resolution = None self.prop = None self.android_id = None self.page_size_kb = None def to_pod(self): pod = super(TargetInfo, self).to_pod() pod['target'] = self.target pod['modules'] = self.modules pod['abi'] = self.abi pod['cpus'] = [c.to_pod() for c in self.cpus] pod['os'] = self.os pod['os_version'] = self.os_version pod['system_id'] = self.system_id pod['hostid'] = self.hostid pod['hostname'] = self.hostname pod['abi'] = self.abi pod['is_rooted'] = self.is_rooted pod['kernel_release'] = self.kernel_version.release pod['kernel_version'] = self.kernel_version.version pod['kernel_config'] = dict(self.kernel_config.iteritems()) pod['sched_features'] = self.sched_features pod['page_size_kb'] = self.page_size_kb if self.os == 'android': pod['screen_resolution'] = self.screen_resolution pod['prop'] = self.prop._properties pod['android_id'] = self.android_id return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) pod['cpus'] = pod.get('cpus', []) pod['system_id'] = pod.get('system_id') pod['hostid'] = pod.get('hostid') pod['hostname'] = pod.get('hostname') pod['sched_features'] = pod.get('sched_features') pod['screen_resolution'] = pod.get('screen_resolution', (0, 0)) pod['prop'] = pod.get('prop') pod['android_id'] = pod.get('android_id') return pod @staticmethod def _pod_upgrade_v2(pod): pod['page_size_kb'] = pod.get('page_size_kb') pod['_pod_version'] = pod.get('format_version', 0) return pod @staticmethod def _pod_upgrade_v3(pod): config = {} for key, value in pod['kernel_config'].items(): config[key.upper()] = value pod['kernel_config'] = config return pod @staticmethod def _pod_upgrade_v4(pod): return TargetInfo._pod_upgrade_v3(pod) @staticmethod def _pod_upgrade_v5(pod): pod['modules'] = pod.get('modules') or [] return pod ================================================ FILE: wa/framework/target/manager.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from devlib import Gem5SimulationPlatform from devlib.utils.misc import memoized from wa.framework import signal from wa.framework.exception import ExecutionError, TargetError, TargetNotRespondingError from wa.framework.plugin import Parameter from wa.framework.target.descriptor import (get_target_description, instantiate_target, instantiate_assistant) from wa.framework.target.info import (get_target_info, get_target_info_from_cache, cache_target_info, read_target_info_cache) from wa.framework.target.runtime_parameter_manager import RuntimeParameterManager from wa.utils.types import module_name_set class TargetManager(object): """ Instantiate the required target and perform configuration and validation of the device. """ parameters = [ Parameter('disconnect', kind=bool, default=False, description=""" Specifies whether the target should be disconnected from at the end of the run. """), ] def __init__(self, name, parameters, outdir): self.outdir = outdir self.logger = logging.getLogger('tm') self.target_name = name self.target = None self.assistant = None self.platform_name = None self.is_responsive = None self.rpm = None self.parameters = parameters self.disconnect = parameters.get('disconnect') def initialize(self): self._init_target() self.assistant.initialize() # If target supports hotplugging, online all cpus before perform discovery # and restore original configuration after completed. if self.target.has('hotplug'): online_cpus = self.target.list_online_cpus() try: self.target.hotplug.online_all() except TargetError: msg = 'Failed to online all CPUS - some information may not be '\ 'able to be retrieved.' self.logger.debug(msg) self.rpm = RuntimeParameterManager(self.target) all_cpus = set(range(self.target.number_of_cpus)) self.target.hotplug.offline(*all_cpus.difference(online_cpus)) else: self.rpm = RuntimeParameterManager(self.target) def finalize(self): if not self.target: return if self.assistant: self.assistant.finalize() if self.disconnect or isinstance(self.target.platform, Gem5SimulationPlatform): self.logger.info('Disconnecting from the device') with signal.wrap('TARGET_DISCONNECT'): self.target.disconnect() def start(self): self.assistant.start() def stop(self): self.assistant.stop() def extract_results(self, context): self.assistant.extract_results(context) @memoized def get_target_info(self): cache = read_target_info_cache() info = get_target_info_from_cache(self.target.system_id, cache=cache) if info is None: info = get_target_info(self.target) cache_target_info(info, cache=cache) else: # If module configuration has changed form when the target info # was previously cached, it is possible additional info will be # available, so should re-generate the cache. if module_name_set(info.modules) != module_name_set(self.target.modules): info = get_target_info(self.target) cache_target_info(info, overwrite=True, cache=cache) return info def reboot(self, context, hard=False): with signal.wrap('REBOOT', self, context): self.target.reboot(hard) def merge_runtime_parameters(self, parameters): return self.rpm.merge_runtime_parameters(parameters) def validate_runtime_parameters(self, parameters): self.rpm.validate_runtime_parameters(parameters) def commit_runtime_parameters(self, parameters): self.rpm.commit_runtime_parameters(parameters) def verify_target_responsive(self, context): can_reboot = context.reboot_policy.can_reboot if not self.target.check_responsive(explode=False): self.is_responsive = False if not can_reboot: raise TargetNotRespondingError('Target unresponsive and is not allowed to reboot.') elif self.target.has('hard_reset'): self.logger.info('Target unresponsive; performing hard reset') self.reboot(context, hard=True) self.is_responsive = True raise ExecutionError('Target became unresponsive but was recovered.') else: raise TargetNotRespondingError('Target unresponsive and hard reset not supported; bailing.') def _init_target(self): tdesc = get_target_description(self.target_name) extra_plat_params = {} if tdesc.platform is Gem5SimulationPlatform: extra_plat_params['host_output_dir'] = self.outdir self.logger.debug('Creating {} target'.format(self.target_name)) self.target = instantiate_target(tdesc, self.parameters, connect=False, extra_platform_params=extra_plat_params) self.is_responsive = True with signal.wrap('TARGET_CONNECT'): self.target.connect() self.logger.info('Setting up target') self.target.setup() self.assistant = instantiate_assistant(tdesc, self.parameters, self.target) ================================================ FILE: wa/framework/target/runtime_config.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import time from collections import defaultdict, OrderedDict from copy import copy from devlib.exception import TargetError from devlib.utils.misc import unique from devlib.utils.types import integer from wa.framework.exception import ConfigError from wa.framework.plugin import Plugin, Parameter from wa.utils.misc import resolve_cpus, resolve_unique_domain_cpus from wa.utils.types import caseless_string, enum logger = logging.getLogger('RuntimeConfig') class RuntimeParameter(Parameter): def __init__(self, name, setter, setter_params=None, **kwargs): super(RuntimeParameter, self).__init__(name, **kwargs) self.setter = setter self.setter_params = setter_params or {} def set(self, obj, value): self.validate_value(self.name, value) self.setter(obj, value, **self.setter_params) class RuntimeConfig(Plugin): name = None kind = 'runtime-config' @property def supported_parameters(self): return list(self._runtime_params.values()) @property def core_names(self): return unique(self.target.core_names) def __init__(self, target, **kwargs): super(RuntimeConfig, self).__init__(**kwargs) self.target = target self._target_checked = False self._runtime_params = {} try: self.initialize() except TargetError: msg = 'Failed to initialize: "{}"' self.logger.debug(msg.format(self.name)) self._runtime_params = {} def initialize(self): raise NotImplementedError() def commit(self): raise NotImplementedError() def set_runtime_parameter(self, name, value): if not self._target_checked: self.check_target() self._target_checked = True self._runtime_params[name].set(self, value) def set_defaults(self): for p in self.supported_parameters: if p.default: self.set_runtime_parameter(p.name, p.default) def validate_parameters(self): raise NotImplementedError() def check_target(self): raise NotImplementedError() def clear(self): raise NotImplementedError() class HotplugRuntimeConfig(RuntimeConfig): ''' NOTE: Currently will fail if trying to hotplug back a core that was hotplugged out when the devlib target was created. ''' name = 'rt-hotplug' @staticmethod def set_num_cores(obj, value, core): cpus = resolve_cpus(core, obj.target) max_cores = len(cpus) value = integer(value) if value > max_cores: msg = 'Cannot set number of {}\'s to {}; max is {}' raise ValueError(msg.format(core, value, max_cores)) msg = 'CPU{} Hotplugging already configured' # Set cpus to be enabled for cpu in cpus[:value]: if cpu in obj.num_cores: raise ConfigError(msg.format(cpu)) obj.num_cores[cpu] = True # Set the remaining cpus to be disabled. for cpu in cpus[value:]: if cpu in obj.num_cores: raise ConfigError(msg.format(cpu)) obj.num_cores[cpu] = False def __init__(self, target): self.num_cores = defaultdict(dict) super(HotplugRuntimeConfig, self).__init__(target) def initialize(self): if not self.target.has('hotplug'): return param_name = 'num_cores' self._runtime_params[param_name] = \ RuntimeParameter(param_name, kind=int, constraint=lambda x: 0 <= x <= self.target.number_of_cpus, description=""" The number of cpu cores to be online """, setter=self.set_num_cores, setter_params={'core': None}) for name in unique(self.target.platform.core_names): param_name = 'num_{}_cores'.format(name) self._runtime_params[param_name] = \ RuntimeParameter(param_name, kind=int, constraint=lambda x, name=name: 0 <= x <= len(self.target.core_cpus(name)), description=""" The number of {} cores to be online """.format(name), setter=self.set_num_cores, setter_params={'core': name}) for cpu_no in range(self.target.number_of_cpus): param_name = 'cpu{}_online'.format(cpu_no) self._runtime_params[param_name] = \ RuntimeParameter(param_name, kind=bool, description=""" Specify whether cpu{} should be online """.format(cpu_no), setter=self.set_num_cores, setter_params={'core': cpu_no}) if self.target.has('bl'): for cluster in ['big', 'little']: param_name = 'num_{}_cores'.format(cluster) self._runtime_params[param_name] = \ RuntimeParameter(param_name, kind=int, constraint=lambda x, c=cluster: 0 <= x <= len(resolve_cpus(c, self.target)), description=""" The number of cores on the {} cluster to be online """.format(cluster), setter=self.set_num_cores, setter_params={'core': cluster}) def check_target(self): if not self.target.has('hotplug'): raise TargetError('Target does not appear to support hotplug') def validate_parameters(self): if self.num_cores and len(self.num_cores) == self.target.number_of_cpus: if all(v is False for v in list(self.num_cores.values())): raise ValueError('Cannot set number of all cores to 0') def commit(self): '''Online all CPUs required in order before then off-lining''' num_cores = sorted(self.num_cores.items()) for cpu, online in num_cores: if online: self.target.hotplug.online(cpu) for cpu, online in reversed(num_cores): if not online: self.target.hotplug.offline(cpu) def clear(self): self.num_cores = defaultdict(dict) class SysfileValuesRuntimeConfig(RuntimeConfig): name = 'rt-sysfiles' # pylint: disable=unused-argument @staticmethod def set_sysfile(obj, values, core): for path, value in values.items(): verify = True if path.endswith('!'): verify = False path = path[:-1] if path in obj.sysfile_values: msg = 'Syspath "{}:{}" already specified with a value of "{}"' raise ConfigError(msg.format(path, value, obj.sysfile_values[path][0])) obj.sysfile_values[path] = (value, verify) def __init__(self, target): self.sysfile_values = OrderedDict() super(SysfileValuesRuntimeConfig, self).__init__(target) def initialize(self): self._runtime_params['sysfile_values'] = \ RuntimeParameter('sysfile_values', kind=dict, merge=True, setter=self.set_sysfile, setter_params={'core': None}, description=""" Sysfile path to be set """) def check_target(self): return True def validate_parameters(self): return def commit(self): for path, (value, verify) in self.sysfile_values.items(): self.target.write_value(path, value, verify=verify) def clear(self): self.sysfile_values = OrderedDict() def check_exists(self, path): if not self.target.file_exists(path): raise ConfigError('Sysfile "{}" does not exist.'.format(path)) class FreqValue(object): def __init__(self, values): if values is None: self.values = values else: self.values = sorted(values) def __call__(self, value): ''' `self.values` can be `None` if the device's supported values could not be retrieved for some reason e.g. the cluster was offline, in this case we assume the user values will be available and allow any integer values. ''' if self.values is None: if isinstance(value, int): return value else: msg = 'CPU frequency values could not be retrieved, cannot resolve "{}"' raise TargetError(msg.format(value)) elif isinstance(value, int) and value in self.values: return value elif isinstance(value, str): value = caseless_string(value) if value in ['min', 'max']: return value msg = 'Invalid frequency value: {}; Must be in {}' raise ValueError(msg.format(value, self.values)) def __str__(self): return 'valid frequency value: {}'.format(self.values) class CpufreqRuntimeConfig(RuntimeConfig): name = 'rt-cpufreq' @staticmethod def set_frequency(obj, value, core): obj.set_param(obj, value, core, 'frequency') @staticmethod def set_max_frequency(obj, value, core): obj.set_param(obj, value, core, 'max_frequency') @staticmethod def set_min_frequency(obj, value, core): obj.set_param(obj, value, core, 'min_frequency') @staticmethod def set_governor(obj, value, core): obj.set_param(obj, value, core, 'governor') @staticmethod def set_governor_tunables(obj, value, core): obj.set_param(obj, value, core, 'governor_tunables') @staticmethod def set_param(obj, value, core, parameter): '''Method to store passed parameter if it is not already specified for that cpu''' cpus = resolve_unique_domain_cpus(core, obj.target) for cpu in cpus: if parameter in obj.config[cpu]: msg = 'Cannot set "{}" for core "{}"; Parameter for CPU{} has already been set' raise ConfigError(msg.format(parameter, core, cpu)) obj.config[cpu][parameter] = value def __init__(self, target): self.config = defaultdict(dict) self.supported_cpu_freqs = {} self.supported_cpu_governors = {} super(CpufreqRuntimeConfig, self).__init__(target) def initialize(self): # pylint: disable=too-many-statements if not self.target.has('cpufreq'): return self._retrive_cpufreq_info() _, common_freqs, common_gov = self._get_common_values() # Add common parameters if available. freq_val = FreqValue(common_freqs) param_name = 'frequency' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_frequency, setter_params={'core': None}, description=""" The desired frequency for all cores """) param_name = 'max_frequency' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_max_frequency, setter_params={'core': None}, description=""" The maximum frequency for all cores """) param_name = 'min_frequency' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_min_frequency, setter_params={'core': None}, description=""" The minimum frequency for all cores """) if common_gov: param_name = 'governor' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=str, allowed_values=common_gov, setter=self.set_governor, setter_params={'core': None}, description=""" The governor to be set for all cores """) param_name = 'gov_tunables' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=dict, merge=True, setter=self.set_governor_tunables, setter_params={'core': None}, aliases=['governor_tunables'], description=""" The governor tunables to be set for all cores """) # Add core name parameters for name in unique(self.target.platform.core_names): cpu = resolve_unique_domain_cpus(name, self.target)[0] freq_val = FreqValue(self.supported_cpu_freqs.get(cpu)) avail_govs = self.supported_cpu_governors.get(cpu) param_name = '{}_frequency'.format(name) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_frequency, setter_params={'core': name}, description=""" The desired frequency for the {} cores """.format(name)) param_name = '{}_max_frequency'.format(name) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_max_frequency, setter_params={'core': name}, description=""" The maximum frequency for the {} cores """.format(name)) param_name = '{}_min_frequency'.format(name) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_min_frequency, setter_params={'core': name}, description=""" The minimum frequency for the {} cores """.format(name)) param_name = '{}_governor'.format(name) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=str, allowed_values=avail_govs, setter=self.set_governor, setter_params={'core': name}, description=""" The governor to be set for the {} cores """.format(name)) param_name = '{}_gov_tunables'.format(name) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=dict, setter=self.set_governor_tunables, setter_params={'core': name}, merge=True, description=""" The governor tunables to be set for the {} cores """.format(name)) # Add cpuX parameters. for cpu_no in range(self.target.number_of_cpus): freq_val = FreqValue(self.supported_cpu_freqs.get(cpu_no)) avail_govs = self.supported_cpu_governors.get(cpu_no) param_name = 'cpu{}_frequency'.format(cpu_no) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_frequency, setter_params={'core': cpu_no}, description=""" The desired frequency for cpu{} """.format(cpu_no)) param_name = 'cpu{}_max_frequency'.format(cpu_no) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_max_frequency, setter_params={'core': cpu_no}, description=""" The maximum frequency for cpu{} """.format(cpu_no)) param_name = 'cpu{}_min_frequency'.format(cpu_no) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_min_frequency, setter_params={'core': cpu_no}, description=""" The minimum frequency for cpu{} """.format(cpu_no)) param_name = 'cpu{}_governor'.format(cpu_no) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=str, allowed_values=avail_govs, setter=self.set_governor, setter_params={'core': cpu_no}, description=""" The governor to be set for cpu{} """.format(cpu_no)) param_name = 'cpu{}_gov_tunables'.format(cpu_no) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=dict, setter=self.set_governor_tunables, setter_params={'core': cpu_no}, merge=True, description=""" The governor tunables to be set for cpu{} """.format(cpu_no)) # Add big.little cores if present on device. if self.target.has('bl'): for cluster in ['big', 'little']: cpu = resolve_unique_domain_cpus(cluster, self.target)[0] freq_val = FreqValue(self.supported_cpu_freqs.get(cpu)) avail_govs = self.supported_cpu_governors.get(cpu) param_name = '{}_frequency'.format(cluster) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_frequency, setter_params={'core': cluster}, description=""" The desired frequency for the {} cluster """.format(cluster)) param_name = '{}_max_frequency'.format(cluster) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_max_frequency, setter_params={'core': cluster}, description=""" The maximum frequency for the {} cluster """.format(cluster)) param_name = '{}_min_frequency'.format(cluster) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=freq_val, setter=self.set_min_frequency, setter_params={'core': cluster}, description=""" The minimum frequency for the {} cluster """.format(cluster)) param_name = '{}_governor'.format(cluster) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=str, allowed_values=avail_govs, setter=self.set_governor, setter_params={'core': cluster}, description=""" The governor to be set for the {} cores """.format(cluster)) param_name = '{}_gov_tunables'.format(cluster) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=dict, setter=self.set_governor_tunables, setter_params={'core': cluster}, merge=True, description=""" The governor tunables to be set for the {} cores """.format(cluster)) def check_target(self): if not self.target.has('cpufreq'): raise TargetError('Target does not appear to support cpufreq') def validate_parameters(self): '''Method to validate parameters against each other''' for cpu in self.config: config = self.config[cpu] minf = config.get('min_frequency') maxf = config.get('max_frequency') freq = config.get('frequency') if freq and minf: msg = 'CPU{}: Can\'t set both cpu frequency and minimum frequency' raise ConfigError(msg.format(cpu)) if freq and maxf: msg = 'CPU{}: Can\'t set both cpu frequency and maximum frequency' raise ConfigError(msg.format(cpu)) if (maxf and minf) and self._resolve_freq(minf, cpu) > self._resolve_freq(maxf, cpu): msg = 'CPU{}: min_frequency "{}" cannot be greater than max_frequency "{}"' raise ConfigError(msg.format(cpu, minf, maxf)) def commit(self): for cpu in self.config: config = self.config[cpu] freq = self._resolve_freq(config.get('frequency'), cpu) minf = self._resolve_freq(config.get('min_frequency'), cpu) maxf = self._resolve_freq(config.get('max_frequency'), cpu) self.configure_governor(cpu, config.get('governor'), config.get('governor_tunables')) self.configure_frequency(cpu, freq, minf, maxf, config.get('governor')) def clear(self): self.config = defaultdict(dict) def configure_governor(self, cpu, governor=None, gov_tunables=None): if not governor and not gov_tunables: return if cpu not in self.target.list_online_cpus(): msg = 'Cannot configure governor for {} as no CPUs are online.' raise TargetError(msg.format(cpu)) if not governor: governor = self.target.get_governor(cpu) if not gov_tunables: gov_tunables = {} self.target.cpufreq.set_governor(cpu, governor, **gov_tunables) def configure_frequency(self, cpu, freq=None, min_freq=None, max_freq=None, governor=None): if freq and (min_freq or max_freq): msg = 'Cannot specify both frequency and min/max frequency' raise ConfigError(msg) if cpu not in self.target.list_online_cpus(): msg = 'Cannot configure frequencies for CPU{} as no CPUs are online.' raise TargetError(msg.format(cpu)) if freq: self._set_frequency(cpu, freq, governor) else: self._set_min_max_frequencies(cpu, min_freq, max_freq) def _resolve_freq(self, value, cpu): if value == 'min': value = self.target.cpufreq.get_min_available_frequency(cpu) elif value == 'max': value = self.target.cpufreq.get_max_available_frequency(cpu) return value def _set_frequency(self, cpu, freq, governor): if not governor: governor = self.target.cpufreq.get_governor(cpu) has_userspace = governor == 'userspace' # Sets all frequency to be to desired frequency if freq < self.target.cpufreq.get_frequency(cpu): self.target.cpufreq.set_min_frequency(cpu, freq) if has_userspace: self.target.cpufreq.set_frequency(cpu, freq) self.target.cpufreq.set_max_frequency(cpu, freq) else: self.target.cpufreq.set_max_frequency(cpu, freq) if has_userspace: self.target.cpufreq.set_frequency(cpu, freq) self.target.cpufreq.set_min_frequency(cpu, freq) def _set_min_max_frequencies(self, cpu, min_freq, max_freq): min_freq_set = False current_min_freq = self.target.cpufreq.get_min_frequency(cpu) current_max_freq = self.target.cpufreq.get_max_frequency(cpu) if max_freq: if max_freq < current_min_freq: if min_freq: self.target.cpufreq.set_min_frequency(cpu, min_freq) self.target.cpufreq.set_max_frequency(cpu, max_freq) min_freq_set = True else: msg = 'CPU {}: Cannot set max_frequency ({}) below current min frequency ({}).' raise ConfigError(msg.format(cpu, max_freq, current_min_freq)) else: self.target.cpufreq.set_max_frequency(cpu, max_freq) if min_freq and not min_freq_set: current_max_freq = max_freq or current_max_freq if min_freq > current_max_freq: msg = 'CPU {}: Cannot set min_frequency ({}) above current max frequency ({}).' raise ConfigError(msg.format(cpu, min_freq, current_max_freq)) self.target.cpufreq.set_min_frequency(cpu, min_freq) def _retrive_cpufreq_info(self): ''' Tries to retrieve cpu freq information for all cpus on device. For each cpu domain, only one cpu is queried for information and duplicated across related cpus. This is to reduce calls to the target and as long as one core per domain is online the remaining cpus information can still be populated. ''' for cluster_cpu in resolve_unique_domain_cpus('all', self.target): domain_cpus = self.target.cpufreq.get_related_cpus(cluster_cpu) for cpu in domain_cpus: if cpu in self.target.list_online_cpus(): supported_cpu_freqs = self.target.cpufreq.list_frequencies(cpu) supported_cpu_governors = self.target.cpufreq.list_governors(cpu) break else: msg = 'CPUFreq information could not be retrieved for{};'\ 'Will not be validated against device.' logger.debug(msg.format(' CPU{},'.format(cpu for cpu in domain_cpus))) return for cpu in domain_cpus: self.supported_cpu_freqs[cpu] = supported_cpu_freqs self.supported_cpu_governors[cpu] = supported_cpu_governors def _get_common_values(self): ''' Find common values for frequency and governors across all cores''' common_freqs = None common_gov = None all_freqs = None initialized = False for cpu in resolve_unique_domain_cpus('all', self.target): if not initialized: initialized = True common_freqs = set(self.supported_cpu_freqs.get(cpu) or []) all_freqs = copy(common_freqs) common_gov = set(self.supported_cpu_governors.get(cpu) or []) else: common_freqs = common_freqs.intersection(self.supported_cpu_freqs.get(cpu) or set()) all_freqs = all_freqs.union(self.supported_cpu_freqs.get(cpu) or set()) common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu) or set()) return all_freqs, common_freqs, common_gov class IdleStateValue(object): def __init__(self, values): if values is None: self.values = values else: self.values = [(value.id, value.name, value.desc) for value in values] def __call__(self, value): if self.values is None: return value if isinstance(value, str): value = caseless_string(value) if value == 'all': return [state[0] for state in self.values] elif value == 'none': return [] else: return [self._get_state_ID(value)] elif isinstance(value, list): valid_states = [] for state in value: valid_states.append(self._get_state_ID(state)) return valid_states else: raise ValueError('Invalid IdleState: "{}"'.format(value)) def _get_state_ID(self, value): '''Checks passed state and converts to its ID''' value = caseless_string(value) for s_id, s_name, s_desc in self.values: if value in (s_id, s_name, s_desc): return s_id msg = 'Invalid IdleState: "{}"; Must be in {}' raise ValueError(msg.format(value, self.values)) def __str__(self): return 'valid idle state: "{}"'.format(self.values).replace('\'', '') class CpuidleRuntimeConfig(RuntimeConfig): name = 'rt-cpuidle' @staticmethod def set_idle_state(obj, value, core): cpus = resolve_cpus(core, obj.target) for cpu in cpus: obj.config[cpu] = [] for state in value: obj.config[cpu].append(state) def __init__(self, target): self.config = defaultdict(dict) self.supported_idle_states = {} super(CpuidleRuntimeConfig, self).__init__(target) def initialize(self): if not self.target.has('cpuidle'): return self._retrieve_device_idle_info() common_idle_states = self._get_common_idle_values() idle_state_val = IdleStateValue(common_idle_states) if common_idle_states: param_name = 'idle_states' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=idle_state_val, setter=self.set_idle_state, setter_params={'core': None}, description=""" The idle states to be set for all cores """) for name in unique(self.target.platform.core_names): cpu = resolve_cpus(name, self.target)[0] idle_state_val = IdleStateValue(self.supported_idle_states.get(cpu)) param_name = '{}_idle_states'.format(name) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=idle_state_val, setter=self.set_idle_state, setter_params={'core': name}, description=""" The idle states to be set for {} cores """.format(name)) for cpu_no in range(self.target.number_of_cpus): idle_state_val = IdleStateValue(self.supported_idle_states.get(cpu_no)) param_name = 'cpu{}_idle_states'.format(cpu_no) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=idle_state_val, setter=self.set_idle_state, setter_params={'core': cpu_no}, description=""" The idle states to be set for cpu{} """.format(cpu_no)) if self.target.has('bl'): for cluster in ['big', 'little']: cpu = resolve_cpus(cluster, self.target)[0] idle_state_val = IdleStateValue(self.supported_idle_states.get(cpu)) param_name = '{}_idle_states'.format(cluster) self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=idle_state_val, setter=self.set_idle_state, setter_params={'core': cluster}, description=""" The idle states to be set for the {} cores """.format(cluster)) def check_target(self): if not self.target.has('cpuidle'): raise TargetError('Target does not appear to support cpuidle') def validate_parameters(self): return def clear(self): self.config = defaultdict(dict) def commit(self): for cpu in self.config: idle_states = set(state.id for state in self.supported_idle_states.get(cpu, [])) enabled = self.config[cpu] disabled = idle_states.difference(enabled) for state in enabled: self.target.cpuidle.enable(state, cpu) for state in disabled: self.target.cpuidle.disable(state, cpu) def _retrieve_device_idle_info(self): for cpu in range(self.target.number_of_cpus): self.supported_idle_states[cpu] = self.target.cpuidle.get_states(cpu) def _get_common_idle_values(self): '''Find common values for cpu idle states across all cores''' common_idle_states = [] for cpu in range(self.target.number_of_cpus): for state in self.supported_idle_states.get(cpu) or []: if state.name not in common_idle_states: common_idle_states.append(state) return common_idle_states ScreenOrientation = enum(['NATURAL', 'LEFT', 'INVERTED', 'RIGHT']) class AndroidRuntimeConfig(RuntimeConfig): name = 'rt-android' @staticmethod def set_brightness(obj, value): if value is not None: obj.config['brightness'] = value @staticmethod def set_airplane_mode(obj, value): if value is not None: obj.config['airplane_mode'] = value @staticmethod def set_rotation(obj, value): if value is not None: obj.config['rotation'] = value.value @staticmethod def set_screen_state(obj, value): if value is not None: obj.config['screen_on'] = value @staticmethod def set_unlock_screen(obj, value): if value is not None: obj.config['unlock_screen'] = value def __init__(self, target): self.config = defaultdict(dict) super(AndroidRuntimeConfig, self).__init__(target) def initialize(self): if self.target.os not in ['android', 'chromeos']: return if self.target.os == 'chromeos' and not self.target.supports_android: return param_name = 'brightness' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=int, constraint=lambda x: 0 <= x <= 255, default=127, setter=self.set_brightness, description=""" Specify the screen brightness to be set for the device """) if self.target.os == 'android': param_name = 'airplane_mode' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=bool, setter=self.set_airplane_mode, description=""" Specify whether airplane mode should be enabled for the device """) param_name = 'rotation' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=ScreenOrientation, setter=self.set_rotation, description=""" Specify the screen orientation for the device """) param_name = 'screen_on' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=bool, default=True, setter=self.set_screen_state, description=""" Specify whether the device screen should be on """) param_name = 'unlock_screen' self._runtime_params[param_name] = \ RuntimeParameter( param_name, kind=str, default=None, setter=self.set_unlock_screen, description=""" Specify how the device screen should be unlocked (e.g., vertical) """) def check_target(self): if self.target.os != 'android' and self.target.os != 'chromeos': raise ConfigError('Target does not appear to be running Android') if self.target.os == 'chromeos' and not self.target.supports_android: raise ConfigError('Target does not appear to support Android') def validate_parameters(self): pass def commit(self): # pylint: disable=too-many-branches if 'airplane_mode' in self.config: new_airplane_mode = self.config['airplane_mode'] old_airplane_mode = self.target.get_airplane_mode() self.target.set_airplane_mode(new_airplane_mode) # If we've just switched airplane mode off, wait a few seconds to # enable the network state to stabilise. That's helpful if we're # about to run a workload that is going to check for network # connectivity. if old_airplane_mode and not new_airplane_mode: self.logger.info('Disabled airplane mode, waiting up to 20 seconds for network setup') network_is_ready = False for _ in range(4): time.sleep(5) network_is_ready = self.target.is_network_connected() if network_is_ready: break if network_is_ready: self.logger.info("Found a network") else: self.logger.warning("Network unreachable") if 'brightness' in self.config: self.target.set_brightness(self.config['brightness']) if 'rotation' in self.config: self.target.set_rotation(self.config['rotation']) if 'screen_on' in self.config: if self.config['screen_on']: self.target.ensure_screen_is_on() else: self.target.ensure_screen_is_off() if self.config.get('unlock_screen'): self.target.ensure_screen_is_on() if self.target.is_screen_locked(): self.target.swipe_to_unlock(self.config['unlock_screen']) def clear(self): self.config = {} ================================================ FILE: wa/framework/target/runtime_parameter_manager.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import namedtuple from wa.framework.exception import ConfigError from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig, HotplugRuntimeConfig, CpufreqRuntimeConfig, CpuidleRuntimeConfig, AndroidRuntimeConfig) from wa.utils.types import obj_dict, caseless_string from wa.framework import pluginloader class RuntimeParameterManager(object): runtime_config_cls = [ # order matters SysfileValuesRuntimeConfig, HotplugRuntimeConfig, CpufreqRuntimeConfig, CpuidleRuntimeConfig, AndroidRuntimeConfig, ] def __init__(self, target): self.target = target self.runtime_params = {} try: for rt_cls in pluginloader.list_plugins(kind='runtime-config'): if rt_cls not in self.runtime_config_cls: self.runtime_config_cls.append(rt_cls) except ValueError: pass self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls] runtime_parameter = namedtuple('RuntimeParameter', 'cfg_point, rt_config') for cfg in self.runtime_configs: for param in cfg.supported_parameters: if param.name in self.runtime_params: msg = 'Duplicate runtime parameter name "{}": in both {} and {}' raise RuntimeError(msg.format(param.name, self.runtime_params[param.name].rt_config.name, cfg.name)) self.runtime_params[param.name] = runtime_parameter(param, cfg) # Uses corresponding config point to merge parameters def merge_runtime_parameters(self, parameters): merged_params = obj_dict() for source in parameters: for name, value in parameters[source].items(): cp = self.get_cfg_point(name) cp.set_value(merged_params, value) return dict(merged_params) # Validates runtime_parameters against each other def validate_runtime_parameters(self, parameters): self.clear_runtime_parameters() self.set_runtime_parameters(parameters) for cfg in self.runtime_configs: cfg.validate_parameters() # Writes the given parameters to the device. def commit_runtime_parameters(self, parameters): self.clear_runtime_parameters() self.set_runtime_parameters(parameters) for cfg in self.runtime_configs: cfg.commit() # Stores a set of parameters performing isolated validation when appropriate def set_runtime_parameters(self, parameters): for name, value in parameters.items(): cfg = self.get_config_for_name(name) if cfg is None: msg = 'Unsupported runtime parameter: "{}"' raise ConfigError(msg.format(name)) cfg.set_runtime_parameter(name, value) def clear_runtime_parameters(self): for cfg in self.runtime_configs: cfg.clear() cfg.set_defaults() def get_config_for_name(self, name): name = caseless_string(name) for k, v in self.runtime_params.items(): if name == k: return v.rt_config return None def get_cfg_point(self, name): name = caseless_string(name) for k, v in self.runtime_params.items(): if name == k or name in v.cfg_point.aliases: return v.cfg_point raise ConfigError('Unknown runtime parameter: {}'.format(name)) ================================================ FILE: wa/framework/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.library' android { compileSdkVersion 28 buildToolsVersion '28.0.3' defaultConfig { minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } } dependencies { implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' } ================================================ FILE: wa/framework/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/framework/uiauto/app/src/main/java/com/arm/wa/uiauto/ActionLogger.java ================================================ /* Copyright 2014-2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto; import android.os.Bundle; import android.util.Log; /** * Basic marker API for workloads to generate start and end markers for * deliminating and timing actions. Markers are output to logcat with debug * priority. Actions represent a series of UI interactions to time. * * The marker API provides a way for instruments and output processors to hook into * per-action timings by parsing logcat logs produced per workload iteration. * * The marker output consists of a logcat tag 'UX_PERF' and a message. The * message consists of a name for the action and a timestamp. The timestamp * is separated by a single space from the name of the action. * * Typical usage: * * ActionLogger logger = ActionLogger("testTag", parameters); * logger.start(); * // actions to be recorded * logger.stop(); */ public class ActionLogger { private String testTag; private boolean enabled; public ActionLogger(String testTag, Bundle parameters) { this.testTag = testTag; this.enabled = parameters.getBoolean("markers_enabled"); } public void start() { if (enabled) { Log.d("UX_PERF", testTag + " start " + System.nanoTime()); } } public void stop() throws Exception { if (enabled) { Log.d("UX_PERF", testTag + " end " + System.nanoTime()); } } } ================================================ FILE: wa/framework/uiauto/app/src/main/java/com/arm/wa/uiauto/ApplaunchInterface.java ================================================ /* Copyright 2013-2017 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto; import android.os.Bundle; import android.support.test.uiautomator.UiObject; /** * ApplaunchInterface.java * Interface used for enabling uxperfapplaunch workload. * This interface gets implemented by all workloads that support application launch * instrumentation. */ public interface ApplaunchInterface { /** * Sets the launchEndObject of a workload, which is a UiObject that marks * the end of the application launch. */ public UiObject getLaunchEndObject(); /** * Runs the Uiautomation methods for clearing the initial run * dialogues on the first time installation of an application package. */ public void runApplicationSetup() throws Exception; /** * Provides the application launch command of the application which is * constructed as a string from the workload. */ public String getLaunchCommand(); /** Passes the workload parameters. */ public void setWorkloadParameters(Bundle parameters); /** Initialize the instrumentation for the workload */ public void initialize_instrumentation(); } ================================================ FILE: wa/framework/uiauto/app/src/main/java/com/arm/wa/uiauto/BaseUiAutomation.java ================================================ /* Copyright 2013-2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto; import android.os.Bundle; import android.os.SystemClock; import android.app.Instrumentation; import android.content.Context; import android.graphics.Point; import android.graphics.Rect; import android.support.test.InstrumentationRegistry; import android.support.test.uiautomator.UiDevice; import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiSelector; import android.support.test.uiautomator.UiWatcher; import android.support.test.uiautomator.UiScrollable; import org.junit.Before; import org.junit.Test; import java.io.BufferedReader; import java.io.File; import java.io.InputStreamReader; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import static android.support.test.InstrumentationRegistry.getArguments; public class BaseUiAutomation { public enum FindByCriteria { BY_ID, BY_TEXT, BY_DESC }; public enum Direction { UP, DOWN, LEFT, RIGHT, NULL }; public enum ScreenOrientation { RIGHT, NATURAL, LEFT, PORTRAIT, LANDSCAPE }; public enum PinchType { IN, OUT, NULL }; // Time in milliseconds public long uiAutoTimeout = 4 * 1000; public static final int CLICK_REPEAT_INTERVAL_MINIMUM = 5; public static final int CLICK_REPEAT_INTERVAL_DEFAULT = 50; public Instrumentation mInstrumentation; public Context mContext; public UiDevice mDevice; @Before public void initialize_instrumentation() { mInstrumentation = InstrumentationRegistry.getInstrumentation(); mDevice = UiDevice.getInstance(mInstrumentation); mContext = mInstrumentation.getTargetContext(); } @Test public void setup() throws Exception { } @Test public void runWorkload() throws Exception { } @Test public void extractResults() throws Exception { } @Test public void teardown() throws Exception { } public void sleep(int second) { SystemClock.sleep(second * 1000); } // Generate a package ID public String getPackageID(Bundle parameters) { String packageName = parameters.getString("package_name"); return packageName + ":id/"; } public boolean takeScreenshot(String name) { Bundle params = getArguments(); String png_dir = params.getString("workdir"); try { return mDevice.takeScreenshot(new File(png_dir, name + ".png")); } catch (NoSuchMethodError e) { return true; } } public void waitText(String text) throws UiObjectNotFoundException { waitText(text, 600); } public void waitText(String text, int second) throws UiObjectNotFoundException { UiSelector selector = new UiSelector(); UiObject text_obj = mDevice.findObject(selector.text(text) .className("android.widget.TextView")); waitObject(text_obj, second); } public void waitObject(UiObject obj) throws UiObjectNotFoundException { waitObject(obj, 600); } public void waitObject(UiObject obj, int second) throws UiObjectNotFoundException { if (!obj.waitForExists(second * 1000)) { throw new UiObjectNotFoundException("UiObject is not found: " + obj.getSelector().toString()); } } public boolean waitUntilNoObject(UiObject obj, int second) { return obj.waitUntilGone(second * 1000); } public void clearLogcat() throws Exception { Runtime.getRuntime().exec("logcat -c"); } public void waitForLogcatText(String searchText, long timeout) throws Exception { long startTime = System.currentTimeMillis(); Process process = Runtime.getRuntime().exec("logcat"); BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; long currentTime = System.currentTimeMillis(); boolean found = false; while ((currentTime - startTime) < timeout) { sleep(2); // poll every two seconds while ((line = reader.readLine()) != null) { if (line.contains(searchText)) { found = true; break; } } if (found) { break; } currentTime = System.currentTimeMillis(); } process.destroy(); if ((currentTime - startTime) >= timeout) { throw new TimeoutException("Timed out waiting for Logcat text \"%s\"".format(searchText)); } } public void registerWatcher(String name, UiWatcher watcher) { mDevice.registerWatcher(name, watcher); } public void runWatchers() { mDevice.runWatchers(); } public void removeWatcher(String name) { mDevice.removeWatcher(name); } public void setScreenOrientation(ScreenOrientation orientation) throws Exception { int width = mDevice.getDisplayWidth(); int height = mDevice.getDisplayHeight(); switch (orientation) { case RIGHT: mDevice.setOrientationRight(); break; case NATURAL: mDevice.setOrientationNatural(); break; case LEFT: mDevice.setOrientationLeft(); break; case LANDSCAPE: if (mDevice.isNaturalOrientation()){ if (height > width){ mDevice.setOrientationRight(); } } else { if (height > width){ mDevice.setOrientationNatural(); } } break; case PORTRAIT: if (mDevice.isNaturalOrientation()){ if (height < width){ mDevice.setOrientationRight(); } } else { if (height < width){ mDevice.setOrientationNatural(); } } break; default: throw new Exception("No orientation specified"); } } public void unsetScreenOrientation() throws Exception { mDevice.unfreezeRotation(); } public void uiObjectPerformLongClick(UiObject view, int steps) throws Exception { Rect rect = view.getBounds(); mDevice.swipe(rect.centerX(), rect.centerY(), rect.centerX(), rect.centerY(), steps); } public int getDisplayHeight() { return mDevice.getDisplayHeight(); } public int getDisplayWidth() { return mDevice.getDisplayWidth(); } public int getDisplayCentreWidth() { return getDisplayWidth() / 2; } public int getDisplayCentreHeight() { return getDisplayHeight() / 2; } public void tapDisplayCentre() { tapDisplay(getDisplayCentreWidth(), getDisplayCentreHeight()); } public void tapDisplay(int x, int y) { mDevice.click(x, y); } public void pressEnter() { mDevice.pressEnter(); } public void pressHome() { mDevice.pressHome(); } public void pressBack() { mDevice.pressBack(); } public void uiObjectSwipe(UiObject view, Direction direction, int steps) throws Exception { switch (direction) { case UP: view.swipeUp(steps); break; case DOWN: view.swipeDown(steps); break; case LEFT: view.swipeLeft(steps); break; case RIGHT: view.swipeRight(steps); break; case NULL: throw new Exception("No direction specified"); default: break; } } public void uiDeviceSwipeVertical(int startY, int endY, int xCoordinate, int steps) { mDevice.swipe(xCoordinate, startY, xCoordinate, endY, steps); } public void uiDeviceSwipeHorizontal(int startX, int endX, int yCoordinate, int steps) { mDevice.swipe(startX, yCoordinate, endX, yCoordinate, steps); } public void uiObjectVertPinchIn(UiObject view, int steps, int percent) throws Exception { final int FINGER_TOUCH_HALF_WIDTH = 20; // Make value between 1 and 100 int nPercent = (percent < 0) ? 1 : (percent > 100) ? 100 : percent; float percentage = nPercent / 100f; Rect rect = view.getVisibleBounds(); if (rect.width() <= FINGER_TOUCH_HALF_WIDTH * 2) { throw new IllegalStateException("Object width is too small for operation"); } // Start at the top-center and bottom-center of the control Point startPoint1 = new Point(rect.centerX(), rect.centerY() + (int) ((rect.height() / 2) * percentage)); Point startPoint2 = new Point(rect.centerX(), rect.centerY() - (int) ((rect.height() / 2) * percentage)); // End at the same point at the center of the control Point endPoint1 = new Point(rect.centerX(), rect.centerY() + FINGER_TOUCH_HALF_WIDTH); Point endPoint2 = new Point(rect.centerX(), rect.centerY() - FINGER_TOUCH_HALF_WIDTH); view.performTwoPointerGesture(startPoint1, startPoint2, endPoint1, endPoint2, steps); } public void uiObjectVertPinchOut(UiObject view, int steps, int percent) throws Exception { final int FINGER_TOUCH_HALF_WIDTH = 20; // Make value between 1 and 100 int nPercent = (percent < 0) ? 1 : (percent > 100) ? 100 : percent; float percentage = nPercent / 100f; Rect rect = view.getVisibleBounds(); if (rect.width() <= FINGER_TOUCH_HALF_WIDTH * 2) { throw new IllegalStateException("Object width is too small for operation"); } // Start from the same point at the center of the control Point startPoint1 = new Point(rect.centerX(), rect.centerY() + FINGER_TOUCH_HALF_WIDTH); Point startPoint2 = new Point(rect.centerX(), rect.centerY() - FINGER_TOUCH_HALF_WIDTH); // End at the top-center and bottom-center of the control Point endPoint1 = new Point(rect.centerX(), rect.centerY() + (int) ((rect.height() / 2) * percentage)); Point endPoint2 = new Point(rect.centerX(), rect.centerY() - (int) ((rect.height() / 2) * percentage)); view.performTwoPointerGesture(startPoint1, startPoint2, endPoint1, endPoint2, steps); } public void uiObjectVertPinch(UiObject view, PinchType direction, int steps, int percent) throws Exception { if (direction.equals(PinchType.IN)) { uiObjectVertPinchIn(view, steps, percent); } else if (direction.equals(PinchType.OUT)) { uiObjectVertPinchOut(view, steps, percent); } } public void uiDeviceSwipeUp(int steps) { mDevice.swipe( getDisplayCentreWidth(), (getDisplayCentreHeight() + (getDisplayCentreHeight() / 2)), getDisplayCentreWidth(), (getDisplayCentreHeight() / 2), steps); } public void uiDeviceSwipeDown(int steps) { mDevice.swipe( getDisplayCentreWidth(), (getDisplayCentreHeight() / 2), getDisplayCentreWidth(), (getDisplayCentreHeight() + (getDisplayCentreHeight() / 2)), steps); } public void uiDeviceSwipeLeft(int steps) { mDevice.swipe( (getDisplayCentreWidth() + (getDisplayCentreWidth() / 2)), getDisplayCentreHeight(), (getDisplayCentreWidth() / 2), getDisplayCentreHeight(), steps); } public void uiDeviceSwipeRight(int steps) { mDevice.swipe( (getDisplayCentreWidth() / 2), getDisplayCentreHeight(), (getDisplayCentreWidth() + (getDisplayCentreWidth() / 2)), getDisplayCentreHeight(), steps); } public void uiDeviceSwipe(Direction direction, int steps) throws Exception { switch (direction) { case UP: uiDeviceSwipeUp(steps); break; case DOWN: uiDeviceSwipeDown(steps); break; case LEFT: uiDeviceSwipeLeft(steps); break; case RIGHT: uiDeviceSwipeRight(steps); break; case NULL: throw new Exception("No direction specified"); default: break; } } public void repeatClickUiObject(UiObject view, int repeatCount, int intervalInMillis) throws Exception { int repeatInterval = intervalInMillis > CLICK_REPEAT_INTERVAL_MINIMUM ? intervalInMillis : CLICK_REPEAT_INTERVAL_DEFAULT; if (repeatCount < 1 || !view.isClickable()) { return; } for (int i = 0; i < repeatCount; ++i) { view.click(); SystemClock.sleep(repeatInterval); // in order to register as separate click } } public UiObject clickUiObject(FindByCriteria criteria, String matching) throws Exception { return clickUiObject(criteria, matching, null, false); } public UiObject clickUiObject(FindByCriteria criteria, String matching, boolean wait) throws Exception { return clickUiObject(criteria, matching, null, wait); } public UiObject clickUiObject(FindByCriteria criteria, String matching, String clazz) throws Exception { return clickUiObject(criteria, matching, clazz, false); } public UiObject clickUiObject(FindByCriteria criteria, String matching, String clazz, boolean wait) throws Exception { UiObject view; switch (criteria) { case BY_ID: view = (clazz == null) ? getUiObjectByResourceId(matching) : getUiObjectByResourceId(matching, clazz); break; case BY_DESC: view = (clazz == null) ? getUiObjectByDescription(matching) : getUiObjectByDescription(matching, clazz); break; case BY_TEXT: default: view = (clazz == null) ? getUiObjectByText(matching) : getUiObjectByText(matching, clazz); break; } if (wait) { view.clickAndWaitForNewWindow(); } else { view.click(); } return view; } public UiObject getUiObjectByResourceId(String resourceId, String className) throws Exception { return getUiObjectByResourceId(resourceId, className, uiAutoTimeout); } public UiObject getUiObjectByResourceId(String resourceId, String className, long timeout) throws Exception { UiObject object = mDevice.findObject(new UiSelector().resourceId(resourceId) .className(className)); if (!object.waitForExists(timeout)) { throw new UiObjectNotFoundException(String.format("Could not find \"%s\" \"%s\"", resourceId, className)); } return object; } public UiObject getUiObjectByResourceId(String id) throws Exception { UiObject object = mDevice.findObject(new UiSelector().resourceId(id)); if (!object.waitForExists(uiAutoTimeout)) { throw new UiObjectNotFoundException("Could not find view with resource ID: " + id); } return object; } public UiObject getUiObjectByDescription(String description, String className) throws Exception { return getUiObjectByDescription(description, className, uiAutoTimeout); } public UiObject getUiObjectByDescription(String description, String className, long timeout) throws Exception { UiObject object = mDevice.findObject(new UiSelector().descriptionContains(description) .className(className)); if (!object.waitForExists(timeout)) { throw new UiObjectNotFoundException(String.format("Could not find \"%s\" \"%s\"", description, className)); } return object; } public UiObject getUiObjectByDescription(String desc) throws Exception { UiObject object = mDevice.findObject(new UiSelector().descriptionContains(desc)); if (!object.waitForExists(uiAutoTimeout)) { throw new UiObjectNotFoundException("Could not find view with description: " + desc); } return object; } public UiObject getUiObjectByText(String text, String className) throws Exception { return getUiObjectByText(text, className, uiAutoTimeout); } public UiObject getUiObjectByText(String text, String className, long timeout) throws Exception { UiObject object = mDevice.findObject(new UiSelector().textContains(text) .className(className)); if (!object.waitForExists(timeout)) { throw new UiObjectNotFoundException(String.format("Could not find \"%s\" \"%s\"", text, className)); } return object; } public UiObject getUiObjectByText(String text) throws Exception { UiObject object = mDevice.findObject(new UiSelector().textContains(text)); if (!object.waitForExists(uiAutoTimeout)) { throw new UiObjectNotFoundException("Could not find view with text: " + text); } return object; } // Helper to select a folder in the gallery public void selectGalleryFolder(String directory) throws Exception { UiObject workdir = mDevice.findObject(new UiSelector().text(directory) .className("android.widget.TextView")); UiScrollable scrollView = new UiScrollable(new UiSelector().scrollable(true)); // If the folder is not present wait for a short time for // the media server to refresh its index. boolean discovered = workdir.waitForExists(TimeUnit.SECONDS.toMillis(10)); if (!discovered && scrollView.exists()) { // First check if the directory is visible on the first // screen and if not scroll to the bottom of the screen to look for it. discovered = scrollView.scrollIntoView(workdir); // If still not discovered scroll back to the top of the screen and // wait for a longer amount of time for the media server to refresh // its index. if (!discovered) { // scrollView.scrollToBeggining() doesn't work for this // particular scrollable view so use device method instead for (int i = 0; i < 10; i++) { uiDeviceSwipeUp(20); } discovered = workdir.waitForExists(TimeUnit.SECONDS.toMillis(60)); // Scroll to the bottom of the screen one last time if (!discovered) { discovered = scrollView.scrollIntoView(workdir); } } } if (discovered) { workdir.clickAndWaitForNewWindow(); } else { throw new UiObjectNotFoundException("Could not find folder : " + directory); } } // If an an app is not designed for running on the latest version of android // (currently Q) an additional screen can popup asking to confirm permissions. public void dismissAndroidPermissionPopup() throws Exception { UiObject permissionAccess = mDevice.findObject(new UiSelector().textMatches( ".*Choose what to allow .* to access")); UiObject continueButton = mDevice.findObject(new UiSelector().resourceId("com.android.permissioncontroller:id/continue_button") .textContains("Continue")); if (permissionAccess.exists() && continueButton.exists()) { continueButton.click(); } } // If an an app is not designed for running on the latest version of android // (currently Q) dissmiss the warning popup if present. public void dismissAndroidVersionPopup() throws Exception { // Ensure we have dissmied any permission screens before looking for the version popup dismissAndroidPermissionPopup(); UiObject warningText = mDevice.findObject(new UiSelector().textContains( "This app was built for an older version of Android")); UiObject acceptButton = mDevice.findObject(new UiSelector().resourceId("android:id/button1") .className("android.widget.Button")); if (warningText.exists() && acceptButton.exists()) { acceptButton.click(); } } // If Chrome is a fresh install then these popups may be presented // dismiss them if visible. public void dismissChromePopup() throws Exception { UiObject accept = mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/terms_accept") .className("android.widget.Button")); if (accept.waitForExists(3000)){ accept.click(); UiObject negative = mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/negative_button") .className("android.widget.Button")); if (negative.waitForExists(10000)) { negative.click(); } } UiObject lite = mDevice.findObject(new UiSelector().resourceId("com.android.chrome:id/button_secondary") .className("android.widget.Button")); if (lite.exists()){ lite.click(); } } // Override getParams function to decode a url encoded parameter bundle before // passing it to workloads. public Bundle getParams() { // Get the original parameter bundle Bundle parameters = getArguments(); // Decode each parameter in the bundle, except null values and "class", as this // used to control instrumentation and therefore not encoded. for (String key : parameters.keySet()) { String param = parameters.getString(key); if (param != null && !key.equals("class")) { param = android.net.Uri.decode(param); parameters = decode(parameters, key, param); } } return parameters; } // Helper function to decode a string and insert it as an appropriate type // into a provided bundle with its key. // Each bundle parameter will be a urlencoded string with 2 characters prefixed to the value // used to store the original type information, e.g. 'fl' -> list of floats. private Bundle decode(Bundle parameters, String key, String value) { char value_type = value.charAt(0); char value_dimension = value.charAt(1); String param = value.substring(2); if (value_dimension == 's') { if (value_type == 's') { parameters.putString(key, param); } else if (value_type == 'f') { parameters.putFloat(key, Float.parseFloat(param)); } else if (value_type == 'd') { parameters.putDouble(key, Double.parseDouble(param)); } else if (value_type == 'b') { parameters.putBoolean(key, Boolean.parseBoolean(param)); } else if (value_type == 'i') { parameters.putInt(key, Integer.parseInt(param)); } else if (value_type == 'n') { parameters.putString(key, "None"); } else { throw new IllegalArgumentException("Error decoding:" + key + value + " - unknown format"); } } else if (value_dimension == 'l') { return decodeArray(parameters, key, value_type, param); } else { throw new IllegalArgumentException("Error decoding:" + key + value + " - unknown format"); } return parameters; } // Helper function to deal with decoding arrays and update the bundle with // an appropriate array type. The string "0newelement0" is used to distinguish // each element from each other in the array when encoded. private Bundle decodeArray(Bundle parameters, String key, char type, String value) { String[] string_list = value.split("0newelement0"); if (type == 's') { parameters.putStringArray(key, string_list); } else if (type == 'i') { int[] int_list = new int[string_list.length]; for (int i = 0; i < string_list.length; i++){ int_list[i] = Integer.parseInt(string_list[i]); } parameters.putIntArray(key, int_list); } else if (type == 'f') { float[] float_list = new float[string_list.length]; for (int i = 0; i < string_list.length; i++){ float_list[i] = Float.parseFloat(string_list[i]); } parameters.putFloatArray(key, float_list); } else if (type == 'd') { double[] double_list = new double[string_list.length]; for (int i = 0; i < string_list.length; i++){ double_list[i] = Double.parseDouble(string_list[i]); } parameters.putDoubleArray(key, double_list); } else if (type == 'b') { boolean[] boolean_list = new boolean[string_list.length]; for (int i = 0; i < string_list.length; i++){ boolean_list[i] = Boolean.parseBoolean(string_list[i]); } parameters.putBooleanArray(key, boolean_list); } else { throw new IllegalArgumentException("Error decoding array: " + value + " - unknown format"); } return parameters; } } ================================================ FILE: wa/framework/uiauto/app/src/main/java/com/arm/wa/uiauto/UiAutoUtils.java ================================================ /* Copyright 2013-2017 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto; import android.os.Bundle; public final class UiAutoUtils { /** Construct launch command of an application. */ public static String createLaunchCommand(Bundle parameters) { String launchCommand; String activityName = parameters.getString("launch_activity"); String packageName = parameters.getString("package_name"); if (activityName.equals("None")) { launchCommand = String.format("am start --user -3 %s", packageName); } else { launchCommand = String.format("am start --user -3 -n %s/%s", packageName, activityName); } return launchCommand; } } ================================================ FILE: wa/framework/uiauto/app/src/main/java/com/arm/wa/uiauto/UxPerfUiAutomation.java ================================================ /* Copyright 2013-2017 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto; import android.os.Bundle; import java.util.logging.Logger; import com.arm.wa.uiauto.BaseUiAutomation.Direction; import com.arm.wa.uiauto.BaseUiAutomation.PinchType; public class UxPerfUiAutomation { private Logger logger = Logger.getLogger(UxPerfUiAutomation.class.getName()); public enum GestureType { UIDEVICE_SWIPE, UIOBJECT_SWIPE, PINCH }; public static class GestureTestParams { public GestureType gestureType; public Direction gestureDirection; public PinchType pinchType; public int percent; public int steps; public GestureTestParams(GestureType gesture, Direction direction, int steps) { this.gestureType = gesture; this.gestureDirection = direction; this.pinchType = PinchType.NULL; this.steps = steps; this.percent = 0; } public GestureTestParams(GestureType gesture, PinchType pinchType, int steps, int percent) { this.gestureType = gesture; this.gestureDirection = Direction.NULL; this.pinchType = pinchType; this.steps = steps; this.percent = percent; } } } ================================================ FILE: wa/framework/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/framework/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -e # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Build and return appropriate exit code if failed ./gradlew clean :app:assembleDebug exit_code=$? if [ $exit_code -ne 0 ]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi cp app/build/outputs/aar/app-debug.aar ./uiauto.aar ================================================ FILE: wa/framework/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Wed May 03 15:42:44 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/framework/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/framework/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/framework/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/framework/version.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys from collections import namedtuple from subprocess import Popen, PIPE VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev']) version = VersionTuple(3, 4, 0, 'dev1') required_devlib_version = VersionTuple(1, 4, 0, 'dev3') def format_version(v): version_string = '{}.{}.{}'.format( v.major, v.minor, v.revision) if v.dev: version_string += '.{}'.format(v.dev) return version_string def get_wa_version(): return format_version(version) def get_wa_version_with_commit(): version_string = get_wa_version() commit = get_commit() if commit: return '{}+{}'.format(version_string, commit) else: return version_string def get_commit(): try: p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__), stdout=PIPE, stderr=PIPE) except FileNotFoundError: return None std, _ = p.communicate() p.wait() if p.returncode: return None return std[:8].decode(sys.stdout.encoding or 'utf-8') ================================================ FILE: wa/framework/workload.py ================================================ # Copyright 2014-2019 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import threading import time try: from shlex import quote except ImportError: from pipes import quote from wa.utils.android import get_cacheable_apk_info, build_apk_launch_command from wa.framework.plugin import TargetedPlugin, Parameter from wa.framework.resource import (ApkFile, ReventFile, File, loose_version_matching, range_version_matching) from wa.framework.exception import WorkloadError, ConfigError from wa.utils.types import ParameterDict, list_or_string, version_tuple from wa.utils.revent import ReventRecorder from wa.utils.exec_control import once_per_instance from wa.utils.misc import atomic_write_path class Workload(TargetedPlugin): """ This is the base class for the workloads executed by the framework. Each of the methods throwing NotImplementedError *must* be implemented by the derived classes. """ kind = 'workload' parameters = [ Parameter('uninstall', kind=bool, default=True, description=""" If ``True``, executables that are installed to the device as part of the workload will be uninstalled again. """), ] # Set this to True to mark that this workload poses a risk of exposing # information to the outside world about the device it runs on. An example of # this would be a benchmark application that sends scores and device data to a # database owned by the maintainer. # The user can then set allow_phone_home=False in their configuration to # prevent this workload from being run accidentally. phones_home = False # Set this to ``True`` to mark the the workload will fail without a network # connection, this enables it to fail early with a clear message. requires_network = False # Set this to specify a custom directory for assets to be pushed to, if unset # the working directory will be used. asset_directory = None # Used to store information about workload assets. deployable_assets = [] def __init__(self, target, **kwargs): super(Workload, self).__init__(target, **kwargs) self.asset_files = [] self.deployed_assets = [] supported_platforms = getattr(self, 'supported_platforms', []) if supported_platforms and self.target.os not in supported_platforms: msg = 'Supported platforms for "{}" are "{}", attempting to run on "{}"' raise WorkloadError(msg.format(self.name, ' '.join(self.supported_platforms), self.target.os)) def init_resources(self, context): """ This method may be used to perform early resource discovery and initialization. This is invoked during the initial loading stage and before the device is ready, so cannot be used for any device-dependent initialization. This method is invoked before the workload instance is validated. """ for asset in self.deployable_assets: self.asset_files.append(context.get(File(self, asset))) @once_per_instance def initialize(self, context): """ This method should be used to perform once-per-run initialization of a workload instance, i.e., unlike ``setup()`` it will not be invoked on each iteration. """ if self.asset_files: self.deploy_assets(context) def setup(self, context): """ Perform the setup necessary to run the workload, such as copying the necessary files to the device, configuring the environments, etc. This is also the place to perform any on-device checks prior to attempting to execute the workload. """ # pylint: disable=unused-argument if self.requires_network and not self.target.is_network_connected(): raise WorkloadError( 'Workload "{}" requires internet. Target does not appear ' 'to be connected to the internet.'.format(self.name)) def run(self, context): """ Execute the workload. This is the method that performs the actual "work" of the workload. """ def extract_results(self, context): """ Extract results on the target """ def update_output(self, context): """ Update the output within the specified execution context with the metrics and artifacts for this workload iteration. """ def teardown(self, context): """ Perform any final clean up for the Workload. """ @once_per_instance def finalize(self, context): if self.cleanup_assets: self.remove_assets(context) def deploy_assets(self, context): """ Deploy assets if available to the target """ # pylint: disable=unused-argument if not self.asset_directory: self.asset_directory = self.target.working_directory else: self.target.execute('mkdir -p {}'.format(self.asset_directory)) for asset in self.asset_files: self.target.push(asset, self.asset_directory) self.deployed_assets.append(self.target.path.join(self.asset_directory, os.path.basename(asset))) def remove_assets(self, context): """ Cleanup assets deployed to the target """ # pylint: disable=unused-argument for asset in self.deployed_assets: self.target.remove(asset) def __str__(self): return ''.format(self.name) class ApkWorkload(Workload): supported_platforms = ['android'] # May be optionally overwritten by subclasses # Times are in seconds loading_time = 10 package_names = [] supported_versions = [] activity = None view = None clear_data_on_reset = True apk_arguments = {} # Set this to True to mark that this workload requires the target apk to be run # for initialisation purposes before the main run is performed. requires_rerun = False parameters = [ Parameter('package_name', kind=str, description=""" The package name that can be used to specify the workload apk to use. """), Parameter('install_timeout', kind=int, constraint=lambda x: x > 0, default=300, description=""" Timeout for the installation of the apk. """), Parameter('version', kind=str, default=None, description=""" The version of the package to be used. """), Parameter('max_version', kind=str, default=None, description=""" The maximum version of the package to be used. """), Parameter('min_version', kind=str, default=None, description=""" The minimum version of the package to be used. """), Parameter('variant', kind=str, default=None, description=""" The variant of the package to be used. """), Parameter('strict', kind=bool, default=False, description=""" Whether to throw an error if the specified package cannot be found on host. """), Parameter('force_install', kind=bool, default=False, description=""" Always re-install the APK, even if matching version is found already installed on the device. """), Parameter('uninstall', kind=bool, default=False, override=True, description=""" If ``True``, will uninstall workload\'s APK as part of teardown.' """), Parameter('exact_abi', kind=bool, default=False, description=""" If ``True``, workload will check that the APK matches the target device ABI, otherwise any suitable APK found will be used. """), Parameter('prefer_host_package', kind=bool, default=True, aliases=['check_apk'], description=""" If ``True`` then a package found on the host will be preferred if it is a valid version and ABI, if not it will fall back to the version on the target if available. If ``False`` then the version on the target is preferred instead. """), Parameter('view', kind=str, default=None, merge=True, description=""" Manually override the 'View' of the workload for use with instruments such as the ``fps`` instrument. If not specified, a workload dependant 'View' will be automatically generated. """), ] @property def package(self): return self.apk.package def __init__(self, target, **kwargs): if target.os == 'chromeos': if target.supports_android: target = target.android_container else: raise ConfigError('Target does not appear to support Android') super(ApkWorkload, self).__init__(target, **kwargs) if self.activity is not None and '.' not in self.activity: # If we're receiving just the activity name, it's taken relative to # the package namespace: self.activity = '.' + self.activity self.apk = PackageHandler(self, package_name=self.package_name, variant=self.variant, strict=self.strict, version=self.version or self.supported_versions, force_install=self.force_install, install_timeout=self.install_timeout, uninstall=self.uninstall, exact_abi=self.exact_abi, prefer_host_package=self.prefer_host_package, clear_data_on_reset=self.clear_data_on_reset, activity=self.activity, min_version=self.min_version, max_version=self.max_version, apk_arguments=self.apk_arguments) def validate(self): if self.min_version and self.max_version: if version_tuple(self.min_version) > version_tuple(self.max_version): msg = 'Cannot specify min version ({}) greater than max version ({})' raise ConfigError(msg.format(self.min_version, self.max_version)) @once_per_instance def initialize(self, context): super(ApkWorkload, self).initialize(context) self.apk.initialize(context) # pylint: disable=access-member-before-definition, attribute-defined-outside-init if self.version is None: self.version = self.apk.apk_info.version_name if self.view is None: self.view = 'SurfaceView - {}/{}'.format(self.apk.package, self.apk.activity) def setup(self, context): super(ApkWorkload, self).setup(context) self.apk.setup(context) if self.requires_rerun: self.setup_rerun() self.apk.restart_activity() time.sleep(self.loading_time) def setup_rerun(self): """ Perform the setup necessary to rerun the workload. Only called if ``requires_rerun`` is set. """ def teardown(self, context): super(ApkWorkload, self).teardown(context) self.apk.teardown() def deploy_assets(self, context): super(ApkWorkload, self).deploy_assets(context) self.target.refresh_files(self.deployed_assets) class ApkUIWorkload(ApkWorkload): def __init__(self, target, **kwargs): super(ApkUIWorkload, self).__init__(target, **kwargs) self.gui = None def init_resources(self, context): super(ApkUIWorkload, self).init_resources(context) self.gui.init_resources(context) @once_per_instance def initialize(self, context): super(ApkUIWorkload, self).initialize(context) self.gui.deploy() def setup(self, context): super(ApkUIWorkload, self).setup(context) self.gui.setup() def run(self, context): super(ApkUIWorkload, self).run(context) self.gui.run() def extract_results(self, context): super(ApkUIWorkload, self).extract_results(context) self.gui.extract_results() def teardown(self, context): self.gui.teardown() super(ApkUIWorkload, self).teardown(context) @once_per_instance def finalize(self, context): super(ApkUIWorkload, self).finalize(context) if self.cleanup_assets: self.gui.remove() class ApkUiautoWorkload(ApkUIWorkload): parameters = [ Parameter('markers_enabled', kind=bool, default=False, description=""" If set to ``True``, workloads will insert markers into logs at various points during execution. These markers may be used by other plugins or post-processing scripts to provide measurements or statistics for specific parts of the workload execution. """), ] def __init__(self, target, **kwargs): super(ApkUiautoWorkload, self).__init__(target, **kwargs) self.gui = UiAutomatorGUI(self) def setup(self, context): self.gui.uiauto_params['package_name'] = self.apk.apk_info.package self.gui.uiauto_params['markers_enabled'] = self.markers_enabled self.gui.init_commands() super(ApkUiautoWorkload, self).setup(context) class ApkReventWorkload(ApkUIWorkload): # May be optionally overwritten by subclasses # Times are in seconds setup_timeout = 5 * 60 run_timeout = 10 * 60 extract_results_timeout = 5 * 60 teardown_timeout = 5 * 60 def __init__(self, target, **kwargs): super(ApkReventWorkload, self).__init__(target, **kwargs) self.gui = ReventGUI(self, target, self.setup_timeout, self.run_timeout, self.extract_results_timeout, self.teardown_timeout) class UIWorkload(Workload): def __init__(self, target, **kwargs): super(UIWorkload, self).__init__(target, **kwargs) self.gui = None def init_resources(self, context): super(UIWorkload, self).init_resources(context) self.gui.init_resources(context) @once_per_instance def initialize(self, context): super(UIWorkload, self).initialize(context) self.gui.deploy() def setup(self, context): super(UIWorkload, self).setup(context) self.gui.setup() def run(self, context): super(UIWorkload, self).run(context) self.gui.run() def extract_results(self, context): super(UIWorkload, self).extract_results(context) self.gui.extract_results() def teardown(self, context): self.gui.teardown() super(UIWorkload, self).teardown(context) @once_per_instance def finalize(self, context): super(UIWorkload, self).finalize(context) if self.cleanup_assets: self.gui.remove() class UiautoWorkload(UIWorkload): supported_platforms = ['android'] parameters = [ Parameter('markers_enabled', kind=bool, default=False, description=""" If set to ``True``, workloads will insert markers into logs at various points during execution. These markers may be used by other plugins or post-processing scripts to provide measurements or statistics for specific parts of the workload execution. """), ] def __init__(self, target, **kwargs): if target.os == 'chromeos': if target.supports_android: target = target.android_container else: raise ConfigError('Target does not appear to support Android') super(UiautoWorkload, self).__init__(target, **kwargs) self.gui = UiAutomatorGUI(self) def setup(self, context): self.gui.uiauto_params['markers_enabled'] = self.markers_enabled self.gui.init_commands() super(UiautoWorkload, self).setup(context) class ReventWorkload(UIWorkload): # May be optionally overwritten by subclasses # Times are in seconds setup_timeout = 5 * 60 run_timeout = 10 * 60 extract_results_timeout = 5 * 60 teardown_timeout = 5 * 60 def __init__(self, target, **kwargs): super(ReventWorkload, self).__init__(target, **kwargs) self.gui = ReventGUI(self, target, self.setup_timeout, self.run_timeout, self.extract_results_timeout, self.teardown_timeout) class UiAutomatorGUI(object): stages = ['setup', 'runWorkload', 'extractResults', 'teardown'] uiauto_runner = 'android.support.test.runner.AndroidJUnitRunner' def __init__(self, owner, package=None, klass='UiAutomation', timeout=600): self.owner = owner self.target = self.owner.target self.uiauto_package = package self.uiauto_class = klass self.timeout = timeout self.logger = logging.getLogger('gui') self.uiauto_file = None self.commands = {} self.uiauto_params = ParameterDict() def init_resources(self, resolver): self.uiauto_file = resolver.get(ApkFile(self.owner, uiauto=True)) if not self.uiauto_package: uiauto_info = get_cacheable_apk_info(self.uiauto_file) self.uiauto_package = uiauto_info.package def init_commands(self): params_dict = self.uiauto_params params_dict['workdir'] = self.target.working_directory params = '' for k, v in params_dict.iter_encoded_items(): params += ' -e {} {}'.format(k, v) for stage in self.stages: class_string = '{}.{}#{}'.format(self.uiauto_package, self.uiauto_class, stage) instrumentation_string = '{}/{}'.format(self.uiauto_package, self.uiauto_runner) cmd_template = 'am instrument -w -r{} -e class {} {}' self.commands[stage] = cmd_template.format(params, class_string, instrumentation_string) def deploy(self): if self.target.package_is_installed(self.uiauto_package): self.target.uninstall_package(self.uiauto_package) self.target.install_apk(self.uiauto_file) def set(self, name, value): self.uiauto_params[name] = value def setup(self, timeout=None): if not self.commands: raise RuntimeError('Commands have not been initialized') self.target.killall('uiautomator') self._execute('setup', timeout or self.timeout) def run(self, timeout=None): if not self.commands: raise RuntimeError('Commands have not been initialized') self._execute('runWorkload', timeout or self.timeout) def extract_results(self, timeout=None): if not self.commands: raise RuntimeError('Commands have not been initialized') self._execute('extractResults', timeout or self.timeout) def teardown(self, timeout=None): if not self.commands: raise RuntimeError('Commands have not been initialized') self._execute('teardown', timeout or self.timeout) def remove(self): self.target.uninstall(self.uiauto_package) def _execute(self, stage, timeout): result = self.target.execute(self.commands[stage], timeout) if 'FAILURE' in result: raise WorkloadError(result) else: self.logger.debug(result) time.sleep(2) class ReventGUI(object): def __init__(self, workload, target, setup_timeout, run_timeout, extract_results_timeout, teardown_timeout): self.logger = logging.getLogger(self.__class__.__name__) self.workload = workload self.target = target self.setup_timeout = setup_timeout self.run_timeout = run_timeout self.extract_results_timeout = extract_results_timeout self.teardown_timeout = teardown_timeout self.revent_recorder = ReventRecorder(self.target) self.on_target_revent_binary = self.target.get_workpath('revent') self.on_target_setup_revent = self.target.get_workpath('{}.setup.revent'.format(self.target.model)) self.on_target_run_revent = self.target.get_workpath('{}.run.revent'.format(self.target.model)) self.on_target_extract_results_revent = self.target.get_workpath('{}.extract_results.revent'.format(self.target.model)) self.on_target_teardown_revent = self.target.get_workpath('{}.teardown.revent'.format(self.target.model)) self.revent_setup_file = None self.revent_run_file = None self.revent_extract_results_file = None self.revent_teardown_file = None def init_resources(self, resolver): self.revent_setup_file = resolver.get(ReventFile(owner=self.workload, stage='setup', target=self.target.model), strict=False) self.revent_run_file = resolver.get(ReventFile(owner=self.workload, stage='run', target=self.target.model)) self.revent_extract_results_file = resolver.get(ReventFile(owner=self.workload, stage='extract_results', target=self.target.model), strict=False) self.revent_teardown_file = resolver.get(resource=ReventFile(owner=self.workload, stage='teardown', target=self.target.model), strict=False) def deploy(self): self.revent_recorder.deploy() def setup(self): self._check_revent_files() if self.revent_setup_file: self.revent_recorder.replay(self.on_target_setup_revent, timeout=self.setup_timeout) def run(self): self.logger.debug('Replaying "%s" with %d seconds timeout', os.path.basename(self.on_target_run_revent), self.run_timeout) self.revent_recorder.replay(self.on_target_run_revent, timeout=self.run_timeout) self.logger.debug('Replay completed.') def extract_results(self): if self.revent_extract_results_file: self.revent_recorder.replay(self.on_target_extract_results_revent, timeout=self.extract_results_timeout) def teardown(self): if self.revent_teardown_file: self.revent_recorder.replay(self.on_target_teardown_revent, timeout=self.teardown_timeout) def remove(self): self.target.remove(self.on_target_setup_revent) self.target.remove(self.on_target_run_revent) self.target.remove(self.on_target_extract_results_revent) self.target.remove(self.on_target_teardown_revent) self.revent_recorder.remove() def _check_revent_files(self): if not self.revent_run_file: # pylint: disable=too-few-format-args message = '{0}.run.revent file does not exist, ' \ 'Please provide one for your target, {0}' raise WorkloadError(message.format(self.target.model)) self.target.push(self.revent_run_file, self.on_target_run_revent) if self.revent_setup_file: self.target.push(self.revent_setup_file, self.on_target_setup_revent) if self.revent_extract_results_file: self.target.push(self.revent_extract_results_file, self.on_target_extract_results_revent) if self.revent_teardown_file: self.target.push(self.revent_teardown_file, self.on_target_teardown_revent) class PackageHandler(object): @property def package(self): if self.apk_info is None: return None return self.apk_info.package @property def activity(self): if self._activity: return self._activity if self.apk_info is None: return None return self.apk_info.activity # pylint: disable=too-many-locals def __init__(self, owner, install_timeout=300, version=None, variant=None, package_name=None, strict=False, force_install=False, uninstall=False, exact_abi=False, prefer_host_package=True, clear_data_on_reset=True, activity=None, min_version=None, max_version=None, apk_arguments=None): self.logger = logging.getLogger('apk') self.owner = owner self.target = self.owner.target self.install_timeout = install_timeout self.version = version self.min_version = min_version self.max_version = max_version self.variant = variant self.package_name = package_name self.strict = strict self.force_install = force_install self.uninstall = uninstall self.exact_abi = exact_abi self.prefer_host_package = prefer_host_package self.clear_data_on_reset = clear_data_on_reset self._activity = activity self.supported_abi = self.target.supported_abi self.apk_file = None self.apk_info = None self.apk_version = None self.logcat_log = None self.error_msg = None self.apk_arguments = apk_arguments def initialize(self, context): self.resolve_package(context) def setup(self, context): context.update_metadata('app_version', self.apk_info.version_name) context.update_metadata('app_name', self.apk_info.package) self.initialize_package(context) self.start_activity() self.target.execute('am kill-all') # kill all *background* activities self.target.clear_logcat() def resolve_package(self, context): if not self.owner.package_names and not self.package_name: msg = 'Cannot Resolve package; No package name(s) specified' raise WorkloadError(msg) self.error_msg = None if self.prefer_host_package: self.resolve_package_from_host(context) if not self.apk_file: self.resolve_package_from_target() else: self.resolve_package_from_target() if not self.apk_file: self.resolve_package_from_host(context) if self.apk_file: self.apk_info = get_cacheable_apk_info(self.apk_file) else: if self.error_msg: raise WorkloadError(self.error_msg) else: if self.package_name: message = 'Package "{package}" not found for workload {name} '\ 'on host or target.' elif self.version: message = 'No matching package found for workload {name} '\ '(version {version}) on host or target.' else: message = 'No matching package found for workload {name} on host or target' raise WorkloadError(message.format(name=self.owner.name, version=self.version, package=self.package_name)) def resolve_package_from_host(self, context): self.logger.debug('Resolving package on host system') if self.package_name: self.apk_file = context.get_resource(ApkFile(self.owner, variant=self.variant, version=self.version, package=self.package_name, exact_abi=self.exact_abi, supported_abi=self.supported_abi, min_version=self.min_version, max_version=self.max_version), strict=self.strict) else: available_packages = [] for package in self.owner.package_names: apk_file = context.get_resource(ApkFile(self.owner, variant=self.variant, version=self.version, package=package, exact_abi=self.exact_abi, supported_abi=self.supported_abi, min_version=self.min_version, max_version=self.max_version), strict=self.strict) if apk_file: available_packages.append(apk_file) if len(available_packages) == 1: self.apk_file = available_packages[0] elif len(available_packages) > 1: self.error_msg = self._get_package_error_msg('host') def resolve_package_from_target(self): # pylint: disable=too-many-branches self.logger.debug('Resolving package on target') found_package = None if self.package_name: if not self.target.package_is_installed(self.package_name): return else: installed_versions = [self.package_name] else: installed_versions = [] for package in self.owner.package_names: if self.target.package_is_installed(package): installed_versions.append(package) if self.version or self.min_version or self.max_version: matching_packages = [] for package in installed_versions: package_version = self.target.get_package_version(package) if self.version: for v in list_or_string(self.version): if loose_version_matching(v, package_version): matching_packages.append(package) else: if range_version_matching(package_version, self.min_version, self.max_version): matching_packages.append(package) if len(matching_packages) == 1: found_package = matching_packages[0] elif len(matching_packages) > 1: self.error_msg = self._get_package_error_msg('device') else: if len(installed_versions) == 1: found_package = installed_versions[0] elif len(installed_versions) > 1: self.error_msg = 'Package version not set and multiple versions found on device.' if found_package: self.logger.debug('Found matching package on target; Pulling to host.') self.apk_file = self.pull_apk(found_package) self.package_name = found_package def initialize_package(self, context): installed_version = self.target.get_package_version(self.apk_info.package) host_version = self.apk_info.version_name if installed_version != host_version: if installed_version: message = '{} host version: {}, device version: {}; re-installing...' self.logger.debug(message.format(self.owner.name, host_version, installed_version)) else: message = '{} host version: {}, not found on device; installing...' self.logger.debug(message.format(self.owner.name, host_version)) self.force_install = True # pylint: disable=attribute-defined-outside-init else: message = '{} version {} present on both device and host.' self.logger.debug(message.format(self.owner.name, host_version)) if self.force_install: if installed_version: self.target.uninstall_package(self.apk_info.package) self.install_apk(context) else: self.reset(context) if self.apk_info.permissions: self.logger.debug('Granting runtime permissions') for permission in self.apk_info.permissions: self.target.grant_package_permission(self.apk_info.package, permission) self.apk_version = host_version def start_activity(self): cmd = build_apk_launch_command(self.apk_info.package, self.activity, self.apk_arguments) output = self.target.execute(cmd) if 'Error:' in output: # this will dismiss any error dialogs self.target.execute('am force-stop {}'.format(self.apk_info.package)) raise WorkloadError(output) self.logger.debug(output) def restart_activity(self): self.target.execute('am force-stop {}'.format(self.apk_info.package)) self.start_activity() def reset(self, context): # pylint: disable=W0613 self.target.execute('am force-stop {}'.format(self.apk_info.package)) if self.clear_data_on_reset: self.target.execute('pm clear {}'.format(self.apk_info.package)) def install_apk(self, context): # pylint: disable=unused-argument output = self.target.install_apk(self.apk_file, self.install_timeout, replace=True, allow_downgrade=True) if 'Failure' in output: if 'ALREADY_EXISTS' in output: msg = 'Using already installed APK (did not uninstall properly?)' self.logger.warning(msg) else: raise WorkloadError(output) else: self.logger.debug(output) def pull_apk(self, package): if not self.target.package_is_installed(package): message = 'Cannot retrieve "{}" as not installed on Target' raise WorkloadError(message.format(package)) package_info = self.target.get_package_info(package) apk_name = self._get_package_name(package_info.apk_path) host_path = os.path.join(self.owner.dependencies_directory, apk_name) with atomic_write_path(host_path) as at_path: self.target.pull(package_info.apk_path, at_path, timeout=self.install_timeout) return host_path def teardown(self): self.target.execute('am force-stop {}'.format(self.apk_info.package)) if self.uninstall: self.target.uninstall_package(self.apk_info.package) def _get_package_name(self, apk_path): return self.target.path.basename(apk_path) def _get_package_error_msg(self, location): if self.version: msg = 'Multiple matches for "{version}" found on {location}.' elif self.min_version and self.max_version: msg = 'Multiple matches between versions "{min_version}" and "{max_version}" found on {location}.' elif self.max_version: msg = 'Multiple matches less than or equal to "{max_version}" found on {location}.' elif self.min_version: msg = 'Multiple matches greater or equal to "{min_version}" found on {location}.' else: msg = '' return msg.format(version=self.version, min_version=self.min_version, max_version=self.max_version, location=location) class TestPackageHandler(PackageHandler): """Class wrapping an APK used through ``am instrument``. """ def __init__(self, owner, instrument_args=None, raw_output=False, instrument_wait=True, no_hidden_api_checks=False, *args, **kwargs): if instrument_args is None: instrument_args = {} super(TestPackageHandler, self).__init__(owner, *args, **kwargs) self.raw = raw_output self.args = instrument_args self.wait = instrument_wait self.no_checks = no_hidden_api_checks self.cmd = '' self.instrument_thread = None self._instrument_output = None def setup(self, context): self.initialize_package(context) words = ['am', 'instrument', '--user', '0'] if self.raw: words.append('-r') if self.wait: words.append('-w') if self.no_checks: words.append('--no-hidden-api-checks') for k, v in self.args.items(): words.extend(['-e', str(k), str(v)]) words.append(str(self.apk_info.package)) if self.apk_info.activity: words[-1] += '/{}'.format(self.apk_info.activity) self.cmd = ' '.join(quote(x) for x in words) self.instrument_thread = threading.Thread(target=self._start_instrument) def start_activity(self): self.instrument_thread.start() def wait_instrument_over(self): self.instrument_thread.join() if 'Error:' in self._instrument_output: cmd = 'am force-stop {}'.format(self.apk_info.package) self.target.execute(cmd) raise WorkloadError(self._instrument_output) def _start_instrument(self): self._instrument_output = self.target.execute(self.cmd) self.logger.debug(self._instrument_output) def _get_package_name(self, apk_path): return 'test_{}'.format(self.target.path.basename(apk_path)) @property def instrument_output(self): if self.instrument_thread.is_alive(): self.instrument_thread.join() # writes self._instrument_output return self._instrument_output ================================================ FILE: wa/instruments/__init__.py ================================================ ================================================ FILE: wa/instruments/delay.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=W0613,E1101,E0203,W0201 import time from wa import Instrument, Parameter from wa.framework.exception import ConfigError, InstrumentError from wa.framework.instrument import extremely_slow from wa.utils.types import identifier class DelayInstrument(Instrument): name = 'delay' description = """ This instrument introduces a delay before beginning a new spec, a new job or before the main execution of a workload. The delay may be specified as either a fixed period or a temperature threshold that must be reached. Optionally, if an active cooling solution is available on the device to speed up temperature drop between runs, it may be controlled using this instrument. """ parameters = [ Parameter('temperature_file', default='/sys/devices/virtual/thermal/thermal_zone0/temp', global_alias='thermal_temp_file', description=""" Full path to the sysfile on the target that contains the target's temperature. """), Parameter('temperature_timeout', kind=int, default=600, global_alias='thermal_timeout', description=""" The timeout after which the instrument will stop waiting even if the specified threshold temperature is not reached. If this timeout is hit, then a warning will be logged stating the actual temperature at which the timeout has ended. """), Parameter('temperature_poll_period', kind=int, default=5, global_alias='thermal_sleep_time', description=""" How long to sleep (in seconds) between polling current target temperature. """), Parameter('temperature_between_specs', kind=int, default=None, global_alias='thermal_threshold_between_specs', description=""" Temperature (in target-specific units) the target must cool down to before the iteration spec will be run. If this is set to ``0`` then the devices initial temperature will used as the threshold. .. note:: This cannot be specified at the same time as ``fixed_between_specs`` """), Parameter('fixed_between_specs', kind=int, default=None, global_alias='fixed_delay_between_specs', description=""" How long to sleep (in seconds) before starting a new workload spec. .. note:: This cannot be specified at the same time as ``temperature_between_specs`` """), Parameter('temperature_between_jobs', kind=int, default=None, global_alias='thermal_threshold_between_jobs', aliases=['temperature_between_iterations'], description=""" Temperature (in target-specific units) the target must cool down to before the next job will be run. If this is set to ``0`` then the devices initial temperature will used as the threshold. .. note:: This cannot be specified at the same time as ``fixed_between_jobs`` """), Parameter('fixed_between_jobs', kind=int, default=None, global_alias='fixed_delay_between_jobs', aliases=['fixed_between_iterations'], description=""" How long to sleep (in seconds) before starting each new job. .. note:: This cannot be specified at the same time as ``temperature_between_jobs`` """), Parameter('fixed_before_start', kind=int, default=None, global_alias='fixed_delay_before_start', description=""" How long to sleep (in seconds) after setup for an iteration has been performed but before running the workload. .. note:: This cannot be specified at the same time as ``temperature_before_start`` """), Parameter('temperature_before_start', kind=int, default=None, global_alias='thermal_threshold_before_start', description=""" Temperature (in device-specific units) the device must cool down to just before the actual workload execution (after setup has been performed). .. note:: This cannot be specified at the same time as ``fixed_between_jobs`` """), Parameter('active_cooling', kind=bool, default=False, description=""" This instrument supports an active cooling solution while waiting for the device temperature to drop to the threshold. If you wish to use this feature please ensure the relevant module is installed on the device. """), ] active_cooling_modules = ['mbed-fan', 'odroidxu3-fan'] def initialize(self, context): if self.active_cooling: self.cooling = self._discover_cooling_module() if not self.cooling: msg = 'Cooling module not found on target. Please install one of the following modules: {}' raise InstrumentError(msg.format(self.active_cooling_modules)) if self.temperature_between_jobs == 0: temp = self.target.read_int(self.temperature_file) self.logger.debug('Setting temperature threshold between jobs to {}'.format(temp)) self.temperature_between_jobs = temp if self.temperature_between_specs == 0: temp = self.target.read_int(self.temperature_file) msg = 'Setting temperature threshold between workload specs to {}' self.logger.debug(msg.format(temp)) self.temperature_between_specs = temp @extremely_slow def start(self, context): if self.fixed_before_start: msg = 'Waiting for {}s before running workload...' self.logger.info(msg.format(self.fixed_before_start)) time.sleep(self.fixed_before_start) elif self.temperature_before_start: self.logger.info('Waiting for temperature drop before running workload...') self.wait_for_temperature(self.temperature_before_start) @extremely_slow def before_job(self, context): if self.fixed_between_specs and context.spec_changed: msg = 'Waiting for {}s before starting new spec...' self.logger.info(msg.format(self.fixed_between_specs)) time.sleep(self.fixed_between_specs) elif self.temperature_between_jobs and context.spec_changed: self.logger.info('Waiting for temperature drop before starting new spec...') self.wait_for_temperature(self.temperature_between_jobs) elif self.fixed_between_jobs: msg = 'Waiting for {}s before starting new job...' self.logger.info(msg.format(self.fixed_between_jobs)) time.sleep(self.fixed_between_jobs) elif self.temperature_between_jobs: self.logger.info('Waiting for temperature drop before starting new job...') self.wait_for_temperature(self.temperature_between_jobs) def wait_for_temperature(self, temperature): if self.active_cooling: self.cooling.start() self.do_wait_for_temperature(temperature) self.cooling.stop() else: self.do_wait_for_temperature(temperature) def do_wait_for_temperature(self, temperature): reading = self.target.read_int(self.temperature_file) waiting_start_time = time.time() while reading > temperature: self.logger.debug('target temperature: {}'.format(reading)) if time.time() - waiting_start_time > self.temperature_timeout: self.logger.warning('Reached timeout; current temperature: {}'.format(reading)) break time.sleep(self.temperature_poll_period) reading = self.target.read_int(self.temperature_file) def validate(self): if (self.temperature_between_specs is not None and self.fixed_between_specs is not None): raise ConfigError('Both fixed delay and thermal threshold specified for specs.') if (self.temperature_between_jobs is not None and self.fixed_between_jobs is not None): raise ConfigError('Both fixed delay and thermal threshold specified for jobs.') if (self.temperature_before_start is not None and self.fixed_before_start is not None): raise ConfigError('Both fixed delay and thermal threshold specified before start.') if not any([self.temperature_between_specs, self.fixed_between_specs, self.temperature_between_jobs, self.fixed_between_jobs, self.temperature_before_start, self.fixed_before_start]): raise ConfigError('Delay instrument is enabled, but no delay is specified.') def _discover_cooling_module(self): cooling_module = None for module in self.active_cooling_modules: if self.target.has(module): if not cooling_module: cooling_module = getattr(self.target, identifier(module)) else: msg = 'Multiple cooling modules found "{}" "{}".' raise InstrumentError(msg.format(cooling_module.name, module)) return cooling_module ================================================ FILE: wa/instruments/dmesg.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from wa import Instrument, Parameter from wa.framework.exception import InstrumentError from wa.framework.instrument import slow from wa.utils.misc import ensure_file_directory_exists as _f class DmesgInstrument(Instrument): # pylint: disable=no-member,attribute-defined-outside-init """ Collected dmesg output before and during the run. """ name = 'dmesg' parameters = [ Parameter('loglevel', kind=int, allowed_values=list(range(8)), description='Set loglevel for console output.') ] loglevel_file = '/proc/sys/kernel/printk' def initialize(self, context): # pylint: disable=unused-argument self.need_root = self.target.os == 'android' if self.need_root and not self.target.is_rooted: raise InstrumentError('Need root to collect dmesg on Android') def setup(self, context): if self.loglevel: self.old_loglevel = self.target.read_int(self.loglevel_file) self.target.write_value(self.loglevel_file, self.loglevel, verify=False) self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before')) self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after')) @slow def start(self, context): with open(self.before_file, 'w') as wfh: wfh.write(self.target.execute('dmesg', as_root=self.need_root)) context.add_artifact('dmesg_before', self.before_file, kind='data') if self.target.is_rooted: self.target.execute('dmesg -c', as_root=True) @slow def stop(self, context): with open(self.after_file, 'w') as wfh: wfh.write(self.target.execute('dmesg', as_root=self.need_root)) context.add_artifact('dmesg_after', self.after_file, kind='data') def teardown(self, context): # pylint: disable=unused-argument if self.loglevel: self.target.write_value(self.loglevel_file, self.old_loglevel, verify=False) ================================================ FILE: wa/instruments/energy_measurement.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=W0613,E1101 from collections import defaultdict import os import shutil from devlib import DerivedEnergyMeasurements from devlib.instrument import CONTINUOUS from devlib.instrument.energy_probe import EnergyProbeInstrument from devlib.instrument.arm_energy_probe import ArmEnergyProbeInstrument from devlib.instrument.daq import DaqInstrument from devlib.instrument.acmecape import AcmeCapeInstrument from devlib.instrument.monsoon import MonsoonInstrument from devlib.platform.arm import JunoEnergyInstrument from devlib.utils.misc import which from wa import Instrument, Parameter from wa.framework import pluginloader from wa.framework.plugin import Plugin from wa.framework.exception import ConfigError, InstrumentError from wa.utils.types import (list_of_strings, list_of_ints, list_or_string, obj_dict, identifier, list_of_numbers) class EnergyInstrumentBackend(Plugin): name = None kind = 'energy_instrument_backend' parameters = [] instrument = None def get_parameters(self): return {p.name: p for p in self.parameters} def validate_parameters(self, params): pass def get_instruments(self, target, metadir, **kwargs): """ Get a dict mapping device keys to an Instruments Typically there is just a single device/instrument, in which case the device key is arbitrary. """ return {None: self.instrument(target, **kwargs)} # pylint: disable=not-callable class DAQBackend(EnergyInstrumentBackend): name = 'daq' description = """ National Instruments Data Acquisition device For more information about the device, please see the NI website: http://www.ni.com/data-acquisition/ This backend has been used with USB-62xx and USB-63xx devices, though other models (e.g. the PCIe variants will most likely also work). This backend relies on the daq-server running on a machinge connected to a DAQ device: https://github.com/ARM-software/daq-server The server is necessary because DAQ devices have drivers only for Windows and very specific (old) Linux kernels, so the machine interfacing with the DAQ is most likely going to be different from the machinge running WA. """ parameters = [ Parameter('resistor_values', kind=list_of_numbers, global_alias='daq_resistor_values', description=""" The values of resistors (in Ohms) across which the voltages are measured on. """), Parameter('labels', kind=list_of_strings, global_alias='daq_labels', description=""" 'List of port labels. If specified, the length of the list must match the length of ``resistor_values``. """), Parameter('host', kind=str, default='localhost', global_alias='daq_server_host', description=""" The host address of the machine that runs the daq Server which the instrument communicates with. """), Parameter('port', kind=int, default=45677, global_alias='daq_server_port', description=""" The port number for daq Server in which daq instrument communicates with. """), Parameter('device_id', kind=str, default='Dev1', global_alias='daq_device_id', description=""" The ID under which the DAQ is registered with the driver. """), Parameter('v_range', kind=str, default=2.5, global_alias='daq_v_range', description=""" Specifies the voltage range for the SOC voltage channel on the DAQ (please refer to ``daq-server`` package documentation for details). """), Parameter('dv_range', kind=str, default=0.2, global_alias='daq_dv_range', description=""" Specifies the voltage range for the resistor voltage channel on the DAQ (please refer to ``daq-server`` package documentation for details). """), Parameter('sample_rate_hz', kind=int, default=10000, global_alias='daq_sampling_rate', description=""" Specify the sample rate in Hz. """), Parameter('channel_map', kind=list_of_ints, default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23), global_alias='daq_channel_map', description=""" Represents mapping from logical AI channel number to physical connector on the DAQ (varies between DAQ models). The default assumes DAQ 6363 and similar with AI channels on connectors 0-7 and 16-23. """), Parameter('keep_raw', kind=bool, default=False, description=""" If set to ``True``, this will prevent the raw files obtained from the device before processing from being deleted (this is maily used for debugging). """), ] instrument = DaqInstrument def validate_parameters(self, params): if not params.get('resistor_values'): raise ConfigError('Mandatory parameter "resistor_values" is not set.') if params.get('labels'): if len(params.get('labels')) != len(params.get('resistor_values')): msg = 'Number of DAQ port labels does not match the number of resistor values.' raise ConfigError(msg) class EnergyProbeBackend(EnergyInstrumentBackend): name = 'energy_probe' description = """ Arm Energy Probe caiman version This backend relies on caiman utility: https://github.com/ARM-software/caiman For more information about Arm Energy Probe please see https://developer.arm.com/products/software-development-tools/ds-5-development-studio/streamline/arm-energy-probe """ parameters = [ Parameter('resistor_values', kind=list_of_ints, description=""" The values of resistors (in Ohms) across which the voltages are measured on. """), Parameter('labels', kind=list_of_strings, description=""" 'List of port labels. If specified, the length of the list must match the length of ``resistor_values``. """), Parameter('device_entry', kind=str, default='/dev/ttyACM0', description=""" Path to /dev entry for the energy probe (it should be /dev/ttyACMx) """), Parameter('keep_raw', kind=bool, default=False, description=""" If set to ``True``, this will prevent the raw files obtained from the device before processing from being deleted (this is maily used for debugging). """), ] instrument = EnergyProbeInstrument def validate_parameters(self, params): if not params.get('resistor_values'): raise ConfigError('Mandatory parameter "resistor_values" is not set.') if params.get('labels'): if len(params.get('labels')) != len(params.get('resistor_values')): msg = 'Number of Energy Probe port labels does not match the number of resistor values.' raise ConfigError(msg) class ArmEnergyProbeBackend(EnergyInstrumentBackend): name = 'arm_energy_probe' description = """ Arm Energy Probe arm-probe version An alternative Arm Energy Probe backend that relies on arm-probe utility: https://git.linaro.org/tools/arm-probe.git For more information about Arm Energy Probe please see https://developer.arm.com/products/software-development-tools/ds-5-development-studio/streamline/arm-energy-probe """ parameters = [ Parameter('config_file', kind=str, description=""" Path to config file of the AEP """), Parameter('keep_raw', kind=bool, default=False, description=""" If set to ``True``, this will prevent the raw files obtained from the device before processing from being deleted (this is maily used for debugging). """), ] instrument = ArmEnergyProbeInstrument def get_instruments(self, target, metadir, **kwargs): """ Get a dict mapping device keys to an Instruments Typically there is just a single device/instrument, in which case the device key is arbitrary. """ shutil.copy(self.config_file, metadir) return {None: self.instrument(target, **kwargs)} def validate_parameters(self, params): if not params.get('config_file'): raise ConfigError('Mandatory parameter "config_file" is not set.') self.config_file = params.get('config_file') if not os.path.exists(self.config_file): raise ConfigError('"config_file" does not exist.') class AcmeCapeBackend(EnergyInstrumentBackend): name = 'acme_cape' description = """ BayLibre ACME cape This backend relies on iio-capture utility: https://github.com/BayLibre/iio-capture For more information about ACME cape please see: https://baylibre.com/acme/ """ parameters = [ Parameter('iio-capture', default=which('iio-capture'), description=""" Path to the iio-capture binary will be taken from the environment, if not specfied. """), Parameter('host', default='baylibre-acme.local', description=""" Host name (or IP address) of the ACME cape board. """), Parameter('iio-devices', default='iio:device0', kind=list_or_string, description=""" """), Parameter('buffer-size', kind=int, default=256, description=""" Size of the capture buffer (in KB). """), Parameter('keep_raw', kind=bool, default=False, description=""" If set to ``True``, this will prevent the raw files obtained from the device before processing from being deleted (this is maily used for debugging). """), ] # pylint: disable=arguments-differ def get_instruments(self, target, metadir, iio_capture, host, iio_devices, buffer_size, keep_raw): # # Devlib's ACME instrument uses iio-capture under the hood, which can # only capture data from one IIO device at a time. Devlib's instrument # API expects to produce a single CSV file for the Instrument, with a # single axis of sample timestamps. These two things cannot be correctly # reconciled without changing the devlib Instrument API - get_data would # need to be able to return two distinct sets of data. # # Instead, where required WA will instantiate the ACME instrument # multiple times (once for each IIO device), producing two separate CSV # files. Aggregated energy info _can_ be meaningfully combined from # multiple IIO devices, so we will later sum the derived stats across # each of the channels reported by the instruments. # ret = {} for iio_device in iio_devices: ret[iio_device] = AcmeCapeInstrument( target, iio_capture=iio_capture, host=host, iio_device=iio_device, buffer_size=buffer_size, keep_raw=keep_raw) return ret class MonsoonBackend(EnergyInstrumentBackend): name = 'monsoon' description = """ Monsoon Solutions power monitor To use this instrument, you need to install the monsoon.py script available from the Android Open Source Project. As of May 2017 this is under the CTS repository: https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py Collects power measurements only, from a selection of two channels, the USB passthrough channel and the main output channel. """ parameters = [ Parameter('monsoon_bin', default=which('monsoon.py'), description=""" Path to monsoon.py executable. If not provided, ``PATH`` is searched. """), Parameter('tty_device', default='/dev/ttyACM0', description=""" TTY device to use to communicate with the Power Monitor. If not provided, /dev/ttyACM0 is used. """) ] instrument = MonsoonInstrument class JunoEnergyBackend(EnergyInstrumentBackend): name = 'juno_readenergy' description = """ Arm Juno development board on-board energy meters For more information about Arm Juno board see: https://developer.arm.com/products/system-design/development-boards/juno-development-board """ instrument = JunoEnergyInstrument class EnergyMeasurement(Instrument): name = 'energy_measurement' description = """ This instrument is designed to be used as an interface to the various energy measurement instruments located in devlib. This instrument should be used to provide configuration for any of the Energy Instrument Backends rather than specifying configuration directly. """ parameters = [ Parameter('instrument', kind=str, mandatory=True, allowed_values=['daq', 'energy_probe', 'acme_cape', 'monsoon', 'juno_readenergy', 'arm_energy_probe'], description=""" Specify the energy instruments to be enabled. """), Parameter('instrument_parameters', kind=dict, default={}, description=""" Specify the parameters used to initialize the desired instruments. To see parameters available for a particular instrument, run wa show See help for ``instrument`` parameter to see available options for . """), Parameter('sites', kind=list_or_string, description=""" Specify which sites measurements should be collected from, if not specified the measurements will be collected for all available sites. """), Parameter('kinds', kind=list_or_string, description=""" Specify the kinds of measurements should be collected, if not specified measurements will be collected for all available kinds. """), Parameter('channels', kind=list_or_string, description=""" Specify the channels to be collected, if not specified the measurements will be collected for all available channels. """), ] def __init__(self, target, loader=pluginloader, **kwargs): super(EnergyMeasurement, self).__init__(target, **kwargs) self.instruments = None self.measurement_csvs = {} self.loader = loader self.backend = self.loader.get_plugin(self.instrument) self.params = obj_dict() instrument_parameters = {identifier(k): v for k, v in self.instrument_parameters.items()} supported_params = self.backend.get_parameters() for name, param in supported_params.items(): value = instrument_parameters.pop(name, None) param.set_value(self.params, value) if instrument_parameters: msg = 'Unexpected parameters for backend "{}": {}' raise ConfigError(msg.format(self.instrument, instrument_parameters)) self.backend.validate_parameters(self.params) def initialize(self, context): self.instruments = self.backend.get_instruments(self.target, context.run_output.metadir, **self.params) for instrument in self.instruments.values(): if not (instrument.mode & CONTINUOUS): # pylint: disable=superfluous-parens msg = '{} instrument does not support continuous measurement collection' raise ConfigError(msg.format(self.instrument)) instrument.setup() for channel in self.channels or []: # Check that the expeccted channels exist. # If there are multiple Instruments, they were all constructed with # the same channels param, so check them all. for instrument in self.instruments.values(): if not instrument.get_channels(channel): raise ConfigError('No channels found for "{}"'.format(channel)) def setup(self, context): for instrument in self.instruments.values(): instrument.reset(sites=self.sites, kinds=self.kinds, channels=self.channels) def start(self, context): for instrument in self.instruments.values(): instrument.start() def stop(self, context): for instrument in self.instruments.values(): instrument.stop() def update_output(self, context): for device, instrument in self.instruments.items(): # Append the device key to the filename and artifact name, unless # it's None (as it will be for backends with only 1 # devce/instrument) if len(self.instruments) > 1: name = 'energy_instrument_output_{}'.format(device) else: name = 'energy_instrument_output' outfile = os.path.join(context.output_directory, '{}.csv'.format(name)) measurements = instrument.get_data(outfile) if not measurements: raise InstrumentError("Failed to collect energy data from {}" .format(self.backend.name)) self.measurement_csvs[device] = measurements context.add_artifact(name, measurements.path, 'data', classifiers={'device': device}) self.extract_metrics(context) def extract_metrics(self, context): metrics_by_name = defaultdict(list) for device in self.instruments: csv = self.measurement_csvs[device] derived_measurements = DerivedEnergyMeasurements.process(csv) for meas in derived_measurements: # Append the device key to the metric name, unless it's None (as # it will be for backends with only 1 devce/instrument) if len(self.instruments) > 1: metric_name = '{}_{}'.format(meas.name, device) else: metric_name = meas.name context.add_metric(metric_name, meas.value, meas.units, classifiers={'device': device}) metrics_by_name[meas.name].append(meas) # Where we have multiple instruments, add up all the metrics with the # same name. For instance with ACME we may have multiple IIO devices # each reporting 'device_energy' and 'device_power', so sum them up to # produce aggregated energy and power metrics. # (Note that metrics_by_name uses the metric name originally reported by # the devlib instrument, before we potentially appended a device key to # it) if len(self.instruments) > 1: for name, metrics in metrics_by_name.items(): units = metrics[0].units value = sum(m.value for m in metrics) context.add_metric(name, value, units) def teardown(self, context): for instrument in self.instruments.values(): instrument.teardown() ================================================ FILE: wa/instruments/fps.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from devlib import SurfaceFlingerFramesInstrument, GfxInfoFramesInstrument from devlib import DerivedSurfaceFlingerStats, DerivedGfxInfoStats from wa import Instrument, Parameter, WorkloadError from wa.utils.types import numeric class FpsInstrument(Instrument): name = 'fps' description = """ Measures Frames Per Second (FPS) and associated metrics for a workload. .. note:: This instrument depends on pandas Python library (which is not part of standard WA dependencies), so you will need to install that first, before you can use it. Android L and below use SurfaceFlinger to calculate the FPS data. Android M and above use gfxinfo to calculate the FPS data. SurfaceFlinger: The view is specified by the workload as ``view`` attribute. This defaults to ``'SurfaceView'`` for game workloads, and ``None`` for non-game workloads (as for them FPS mesurement usually doesn't make sense). Individual workloads may override this. gfxinfo: The view is specified by the workload as ``package`` attribute. This is because gfxinfo already processes for all views in a package. """ parameters = [ Parameter('drop_threshold', kind=numeric, default=5, description=""" Data points below this FPS will be dropped as they do not constitute "real" gameplay. The assumption being that while actually running, the FPS in the game will not drop below X frames per second, except on loading screens, menus, etc, which should not contribute to FPS calculation. """), Parameter('keep_raw', kind=bool, default=False, description=""" If set to ``True``, this will keep the raw dumpsys output in the results directory (this is maily used for debugging) Note: frames.csv with collected frames data will always be generated regardless of this setting. """), Parameter('crash_threshold', kind=float, default=0.7, description=""" Specifies the threshold used to decided whether a measured/expected frames ration indicates a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a quarter lower than expected, it will treated as a content crash. If set to zero, no crash check will be performed. """), Parameter('period', kind=float, default=2, constraint=lambda x: x > 0, description=""" Specifies the time period between polling frame data in seconds when collecting frame data. Using a lower value improves the granularity of timings when recording actions that take a short time to complete. Note, this will produce duplicate frame data in the raw dumpsys output, however, this is filtered out in frames.csv. It may also affect the overall load on the system. The default value of 2 seconds corresponds with the NUM_FRAME_RECORDS in android/services/surfaceflinger/FrameTracker.h (as of the time of writing currently 128) and a frame rate of 60 fps that is applicable to most devices. """), Parameter('force_surfaceflinger', kind=bool, default=False, description=""" By default, the method to capture fps data is based on Android version. If this is set to true, force the instrument to use the SurfaceFlinger method regardless of its Android version. """), ] def __init__(self, target, **kwargs): super(FpsInstrument, self).__init__(target, **kwargs) self.collector = None self.processor = None self._is_enabled = None def setup(self, context): use_gfxinfo = self.target.get_sdk_version() >= 23 and not self.force_surfaceflinger if use_gfxinfo: collector_target_attr = 'package' else: collector_target_attr = 'view' collector_target = getattr(context.workload, collector_target_attr, None) if not collector_target: self._is_enabled = False msg = 'Workload {} does not define a {}; disabling frame collection and FPS evaluation.' self.logger.info(msg.format(context.workload.name, collector_target_attr)) return self._is_enabled = True if use_gfxinfo: self.collector = GfxInfoFramesInstrument(self.target, collector_target, self.period) self.processor = DerivedGfxInfoStats(self.drop_threshold, filename='fps.csv') else: self.collector = SurfaceFlingerFramesInstrument(self.target, collector_target, self.period) self.processor = DerivedSurfaceFlingerStats(self.drop_threshold, filename='fps.csv') self.collector.reset() def start(self, context): # pylint: disable=unused-argument if not self._is_enabled: return self.collector.start() def stop(self, context): # pylint: disable=unused-argument if not self._is_enabled: return self.collector.stop() def update_output(self, context): if not self._is_enabled: return outpath = os.path.join(context.output_directory, 'frames.csv') frames_csv = self.collector.get_data(outpath) raw_output = self.collector.get_raw() processed = self.processor.process(frames_csv) processed.extend(self.processor.process_raw(*raw_output)) fps, frame_count, fps_csv = processed[:3] rest = processed[3:] context.add_metric(fps.name, fps.value, fps.units) context.add_metric(frame_count.name, frame_count.value, frame_count.units) context.add_artifact('frames', frames_csv.path, kind='raw') context.add_artifact('fps', fps_csv.path, kind='data') for metric in rest: context.add_metric(metric.name, metric.value, metric.units, lower_is_better=True) if not self.keep_raw: for entry in raw_output: if os.path.isdir(entry): shutil.rmtree(entry) elif os.path.isfile(entry): os.remove(entry) if not frame_count.value: context.add_event('Could not find frames data in gfxinfo output') context.set_status('PARTIAL') self.check_for_crash(context, fps.value, frame_count.value, context.current_job.run_time.total_seconds()) def check_for_crash(self, context, fps, frames, exec_time): if not self.crash_threshold: return self.logger.debug('Checking for crashed content.') if all([exec_time, fps, frames]): expected_frames = fps * exec_time ratio = frames / expected_frames self.logger.debug('actual/expected frames: {:.2}'.format(ratio)) if ratio < self.crash_threshold: msg = 'Content for {} appears to have crashed.\n'.format(context.current_job.spec.label) msg += 'Content crash detected (actual/expected frames: {:.2}).'.format(ratio) raise WorkloadError(msg) ================================================ FILE: wa/instruments/hwmon.py ================================================ # Copyright 2017-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from devlib import HwmonInstrument as _Instrument from wa import Instrument from wa.framework.instrument import fast MOMENTARY_QUANTITIES = ['temperature', 'power', 'voltage', 'current', 'fps'] CUMULATIVE_QUANTITIES = ['energy', 'tx', 'tx/rx', 'frames'] class HwmonInstrument(Instrument): name = 'hwmon' description = """ Hardware Monitor (hwmon) is a generic Linux kernel subsystem, providing access to hardware monitoring components like temperature or voltage/current sensors. Data from hwmon that are a snapshot of a fluctuating value, such as temperature and voltage, are reported once at the beginning and once at the end of the workload run. Data that are a cumulative total of a quantity, such as energy (which is the cumulative total of power consumption), are reported as the difference between the values at the beginning and at the end of the workload run. There is currently no functionality to filter sensors: all of the available hwmon data will be reported. """ def initialize(self, context): # pylint: disable=unused-argument self.instrument = _Instrument(self.target) def setup(self, context): # pylint: disable=unused-argument self.instrument.reset() @fast def start(self, context): # pylint: disable=unused-argument self.before = self.instrument.take_measurement() @fast def stop(self, context): # pylint: disable=unused-argument self.after = self.instrument.take_measurement() def update_output(self, context): measurements_before = {m.channel.label: m for m in self.before} measurements_after = {m.channel.label: m for m in self.after} if list(measurements_before.keys()) != list(measurements_after.keys()): self.logger.warning( 'hwmon before/after measurements returned different entries!') for label, measurement_after in measurements_after.items(): if label not in measurements_before: continue # We've already warned about this measurement_before = measurements_before[label] if measurement_after.channel.kind in MOMENTARY_QUANTITIES: context.add_metric('{}_before'.format(label), measurement_before.value, measurement_before.channel.units) context.add_metric('{}_after'.format(label), measurement_after.value, measurement_after.channel.units) elif measurement_after.channel.kind in CUMULATIVE_QUANTITIES: diff = measurement_after.value - measurement_before.value context.add_metric(label, diff, measurement_after.channel.units) else: self.logger.warning( "Don't know what to do with hwmon channel '{}'" .format(measurement_after.channel)) def teardown(self, context): # pylint: disable=unused-argument self.instrument.teardown() ================================================ FILE: wa/instruments/misc.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=W0613,no-member,attribute-defined-outside-init """ Some "standard" instruments to collect additional info about workload execution. .. note:: The run() method of a Workload may perform some "boilerplate" as well as the actual execution of the workload (e.g. it may contain UI automation needed to start the workload). This "boilerplate" execution will also be measured by these instruments. As such, they are not suitable for collected precise data about specific operations. """ import os import logging import time import tarfile from subprocess import CalledProcessError from devlib.exception import TargetError from wa import Instrument, Parameter, very_fast from wa.framework.exception import ConfigError from wa.framework.instrument import slow from wa.utils.diff import diff_sysfs_dirs, diff_interrupt_files from wa.utils.misc import as_relative, safe_extract from wa.utils.misc import ensure_file_directory_exists as _f from wa.utils.misc import ensure_directory_exists as _d from wa.utils.types import list_of_strings from wa.utils.android import get_cacheable_apk_info logger = logging.getLogger(__name__) class SysfsExtractor(Instrument): name = 'sysfs_extractor' description = """ Collects the contest of a set of directories, before and after workload execution and diffs the result. """ mount_command = 'mount -t tmpfs -o size={} tmpfs {}' extract_timeout = 30 tarname = 'sysfs.tar.gz' DEVICE_PATH = 0 BEFORE_PATH = 1 AFTER_PATH = 2 DIFF_PATH = 3 parameters = [ Parameter('paths', kind=list_of_strings, mandatory=True, description="""A list of paths to be pulled from the device. These could be directories as well as files.""", global_alias='sysfs_extract_dirs'), Parameter('use_tmpfs', kind=bool, default=None, description=""" Specifies whether tmpfs should be used to cache sysfile trees and then pull them down as a tarball. This is significantly faster then just copying the directory trees from the device directly, but requires root and may not work on all devices. Defaults to ``True`` if the device is rooted and ``False`` if it is not. """), Parameter('tmpfs_mount_point', default=None, description="""Mount point for tmpfs partition used to store snapshots of paths."""), Parameter('tmpfs_size', default='32m', description="""Size of the tempfs partition."""), ] def initialize(self, context): if not self.target.is_rooted and self.use_tmpfs: # pylint: disable=access-member-before-definition raise ConfigError('use_tempfs must be False for an unrooted device.') elif self.use_tmpfs is None: # pylint: disable=access-member-before-definition self.use_tmpfs = self.target.is_rooted if self.use_tmpfs: self.on_device_before = self.target.path.join(self.tmpfs_mount_point, 'before') self.on_device_after = self.target.path.join(self.tmpfs_mount_point, 'after') if not self.target.file_exists(self.tmpfs_mount_point): self.target.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True) self.target.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point), as_root=True) def setup(self, context): before_dirs = [ _d(os.path.join(context.output_directory, 'before', self._local_dir(d))) for d in self.paths ] after_dirs = [ _d(os.path.join(context.output_directory, 'after', self._local_dir(d))) for d in self.paths ] diff_dirs = [ _d(os.path.join(context.output_directory, 'diff', self._local_dir(d))) for d in self.paths ] self.device_and_host_paths = list(zip(self.paths, before_dirs, after_dirs, diff_dirs)) if self.use_tmpfs: for d in self.paths: before_dir = self.target.path.join(self.on_device_before, self.target.path.dirname(as_relative(d))) after_dir = self.target.path.join(self.on_device_after, self.target.path.dirname(as_relative(d))) if self.target.file_exists(before_dir): self.target.execute('rm -rf {}'.format(before_dir), as_root=True) self.target.execute('mkdir -p {}'.format(before_dir), as_root=True) if self.target.file_exists(after_dir): self.target.execute('rm -rf {}'.format(after_dir), as_root=True) self.target.execute('mkdir -p {}'.format(after_dir), as_root=True) @slow def start(self, context): if self.use_tmpfs: for d in self.paths: dest_dir = self.target.path.join(self.on_device_before, as_relative(d)) if '*' in dest_dir: dest_dir = self.target.path.dirname(dest_dir) self.target.execute('{} cp -Hr {} {}'.format(self.target.busybox, d, dest_dir), as_root=True, check_exit_code=False) else: # not rooted for dev_dir, before_dir, _, _ in self.device_and_host_paths: self.target.pull(dev_dir, before_dir) @slow def stop(self, context): if self.use_tmpfs: for d in self.paths: dest_dir = self.target.path.join(self.on_device_after, as_relative(d)) if '*' in dest_dir: dest_dir = self.target.path.dirname(dest_dir) self.target.execute('{} cp -Hr {} {}'.format(self.target.busybox, d, dest_dir), as_root=True, check_exit_code=False) else: # not using tmpfs for dev_dir, _, after_dir, _ in self.device_and_host_paths: self.target.pull(dev_dir, after_dir) def update_output(self, context): if self.use_tmpfs: on_device_tarball = self.target.path.join(self.target.working_directory, self.tarname) on_host_tarball = self.target.path.join(context.output_directory, self.tarname) self.target.execute('{} tar czf {} -C {} .'.format(self.target.busybox, on_device_tarball, self.tmpfs_mount_point), as_root=True) self.target.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True) self.target.pull(on_device_tarball, on_host_tarball) with tarfile.open(on_host_tarball, 'r:gz') as tf: safe_extract(tf, context.output_directory) self.target.remove(on_device_tarball) os.remove(on_host_tarball) for paths in self.device_and_host_paths: after_dir = paths[self.AFTER_PATH] dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*' if (not os.listdir(after_dir) and self.target.file_exists(dev_dir) and self.target.list_directory(dev_dir)): self.logger.error('sysfs files were not pulled from the device.') self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it for dev_dir, before_dir, after_dir, diff_dir in self.device_and_host_paths: diff_sysfs_dirs(before_dir, after_dir, diff_dir) context.add_artifact('{} [before]'.format(dev_dir), before_dir, kind='data', classifiers={'stage': 'before'}) context.add_artifact('{} [after]'.format(dev_dir), after_dir, kind='data', classifiers={'stage': 'after'}) context.add_artifact('{} [diff]'.format(dev_dir), diff_dir, kind='data', classifiers={'stage': 'diff'}) def teardown(self, context): self._one_time_setup_done = [] def finalize(self, context): if self.use_tmpfs: try: self.target.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True) except (TargetError, CalledProcessError): # assume a directory but not mount point pass self.target.execute('rm -rf {}'.format(self.tmpfs_mount_point), as_root=True, check_exit_code=False) def validate(self): if not self.tmpfs_mount_point: # pylint: disable=access-member-before-definition self.tmpfs_mount_point = self.target.get_workpath('temp-fs') def _local_dir(self, directory): return os.path.dirname(as_relative(directory).replace(self.target.path.sep, os.sep)) class ExecutionTimeInstrument(Instrument): name = 'execution_time' description = """ Measure how long it took to execute the run() methods of a Workload. """ def __init__(self, target, **kwargs): super(ExecutionTimeInstrument, self).__init__(target, **kwargs) self.start_time = None self.end_time = None @very_fast def start(self, context): self.start_time = time.time() @very_fast def stop(self, context): self.end_time = time.time() def update_output(self, context): execution_time = self.end_time - self.start_time context.add_metric('execution_time', execution_time, 'seconds') class ApkVersion(Instrument): name = 'apk_version' description = """ Extracts APK versions for workloads that have them. """ def __init__(self, device, **kwargs): super(ApkVersion, self).__init__(device, **kwargs) self.apk_info = None def setup(self, context): if hasattr(context.workload, 'apk_file'): self.apk_info = get_cacheable_apk_info(context.workload.apk_file) else: self.apk_info = None def update_output(self, context): if self.apk_info: context.result.add_metric(self.name, self.apk_info.version_name) class InterruptStatsInstrument(Instrument): name = 'interrupts' description = """ Pulls the ``/proc/interrupts`` file before and after workload execution and diffs them to show what interrupts occurred during that time. """ def __init__(self, target, **kwargs): super(InterruptStatsInstrument, self).__init__(target, **kwargs) self.before_file = None self.after_file = None self.diff_file = None def setup(self, context): self.before_file = os.path.join(context.output_directory, 'before', 'proc', 'interrupts') self.after_file = os.path.join(context.output_directory, 'after', 'proc', 'interrupts') self.diff_file = os.path.join(context.output_directory, 'diff', 'proc', 'interrupts') def start(self, context): with open(_f(self.before_file), 'w') as wfh: wfh.write(self.target.execute('cat /proc/interrupts')) def stop(self, context): with open(_f(self.after_file), 'w') as wfh: wfh.write(self.target.execute('cat /proc/interrupts')) def update_output(self, context): context.add_artifact('interrupts [before]', self.before_file, kind='data', classifiers={'stage': 'before'}) # If workload execution failed, the after_file may not have been created. if os.path.isfile(self.after_file): diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file)) context.add_artifact('interrupts [after]', self.after_file, kind='data', classifiers={'stage': 'after'}) context.add_artifact('interrupts [diff]', self.diff_file, kind='data', classifiers={'stage': 'diff'}) class DynamicFrequencyInstrument(SysfsExtractor): name = 'cpufreq' description = """ Collects dynamic frequency (DVFS) settings before and after workload execution. """ tarname = 'cpufreq.tar.gz' parameters = [ Parameter('paths', mandatory=False, override=True), ] def setup(self, context): self.paths = ['/sys/devices/system/cpu'] if self.use_tmpfs: self.paths.append('/sys/class/devfreq/*') # the '*' would cause problems for adb pull. super(DynamicFrequencyInstrument, self).setup(context) def validate(self): super(DynamicFrequencyInstrument, self).validate() if not self.tmpfs_mount_point.endswith('-cpufreq'): # pylint: disable=access-member-before-definition self.tmpfs_mount_point += '-cpufreq' ================================================ FILE: wa/instruments/perf.py ================================================ # Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=unused-argument import csv import os import re from devlib.collector.perf import PerfCollector from wa import Instrument, Parameter, ConfigError from wa.utils.types import list_or_string, list_of_strs, numeric PERF_COUNT_REGEX = re.compile(r'^(CPU\d+)?\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$') class PerfInstrument(Instrument): name = 'perf' description = """ Perf is a Linux profiling tool with performance counters. Simpleperf is an Android profiling tool with performance counters. It is highly recomended to use perf_type = simpleperf when using this instrument on android devices since it recognises android symbols in record mode and is much more stable when reporting record .data files. For more information see simpleperf documentation at: https://android.googlesource.com/platform/system/extras/+/master/simpleperf/doc/README.md Performance counters are CPU hardware registers that count hardware events such as instructions executed, cache-misses suffered, or branches mispredicted. They form a basis for profiling applications to trace dynamic control flow and identify hotspots. perf accepts options and events. If no option is given the default '-a' is used. For events, the default events for perf are migrations and cs. The default events for simpleperf are raw-cpu-cycles, raw-l1-dcache, raw-l1-dcache-refill, raw-instructions-retired. They both can be specified in the config file. Events must be provided as a list that contains them and they will look like this :: (for perf_type = perf ) perf_events = ['migrations', 'cs'] (for perf_type = simpleperf) perf_events = ['raw-cpu-cycles', 'raw-l1-dcache'] Events can be obtained by typing the following in the command line on the device :: perf list simpleperf list Whereas options, they can be provided as a single string as following :: perf_options = '-a -i' perf_options = '--app com.adobe.reader' Options can be obtained by running the following in the command line :: man perf-stat """ parameters = [ Parameter('perf_type', kind=str, allowed_values=['perf', 'simpleperf'], default='perf', global_alias='perf_type', description="""Specifies which type of perf binaries to install. Use simpleperf for collecting perf data on android systems."""), Parameter('command', kind=str, default='stat', allowed_values=['stat', 'record'], global_alias='perf_command', description="""Specifies which perf command to use. If in record mode report command will also be executed and results pulled from target along with raw data file"""), Parameter('events', kind=list_of_strs, global_alias='perf_events', description="""Specifies the events to be counted."""), Parameter('optionstring', kind=list_or_string, default='-a', global_alias='perf_options', description="""Specifies options to be used for the perf command. This may be a list of option strings, in which case, multiple instances of perf will be kicked off -- one for each option string. This may be used to e.g. collected different events from different big.LITTLE clusters. In order to profile a particular application process for android with simpleperf use the --app option e.g. --app com.adobe.reader """), Parameter('report_option_string', kind=str, global_alias='perf_report_options', default=None, description="""Specifies options to be used to gather report when record command is used. It's highly recommended to use perf_type simpleperf when running on android devices as reporting options are unstable with perf"""), Parameter('run_report_sample', kind=bool, default=False, description="""If true, run 'perf/simpleperf report-sample'. It only works with the record command."""), Parameter('report_sample_options', kind=str, default=None, description="""Specifies options to pass to report-samples when run_report_sample is true."""), Parameter('labels', kind=list_of_strs, default=None, global_alias='perf_labels', description="""Provides labels for perf/simpleperf output for each optionstring. If specified, the number of labels must match the number of ``optionstring``\ s. """), Parameter('force_install', kind=bool, default=False, description=""" always install perf binary even if perf is already present on the device. """), Parameter('validate_pmu_events', kind=bool, default=True, description=""" Query the hardware capabilities to verify the specified PMU events. """), ] def __init__(self, target, **kwargs): super(PerfInstrument, self).__init__(target, **kwargs) self.collector = None self.outdir = None def validate(self): if self.report_option_string and (self.command != "record"): raise ConfigError("report_option_string only works with perf/simpleperf record. Set command to record or remove report_option_string") if self.report_sample_options and (self.command != "record"): raise ConfigError("report_sample_options only works with perf/simpleperf record. Set command to record or remove report_sample_options") if self.run_report_sample and (self.command != "record"): raise ConfigError("run_report_sample only works with perf/simpleperf record. Set command to record or remove run_report_sample") def initialize(self, context): if self.report_sample_options: self.run_report_sample = True self.collector = PerfCollector(self.target, self.perf_type, self.command, self.events, self.optionstring, self.report_option_string, self.run_report_sample, self.report_sample_options, self.labels, self.force_install, self.validate_pmu_events) def setup(self, context): self.outdir = os.path.join(context.output_directory, self.perf_type) self.collector.set_output(self.outdir) self.collector.reset() def start(self, context): self.collector.start() def stop(self, context): self.collector.stop() def update_output(self, context): self.logger.info('Extracting reports from target...') self.collector.get_data() if self.perf_type == 'perf': self._process_perf_output(context) else: self._process_simpleperf_output(context) def teardown(self, context): self.collector.reset() def _process_perf_output(self, context): if self.command == 'stat': self._process_perf_stat_output(context) elif self.command == 'record': self._process_perf_record_output(context) def _process_simpleperf_output(self, context): if self.command == 'stat': self._process_simpleperf_stat_output(context) elif self.command == 'record': self._process_simpleperf_record_output(context) def _process_perf_stat_output(self, context): for host_file in os.listdir(self.outdir): label = host_file.split('.out')[0] host_file_path = os.path.join(self.outdir, host_file) context.add_artifact(label, host_file_path, 'raw') with open(host_file_path) as fh: in_results_section = False for line in fh: if 'Performance counter stats' in line: in_results_section = True next(fh) # skip the following blank line if not in_results_section: continue if not line.strip(): # blank line in_results_section = False break else: self._add_perf_stat_metric(line, label, context) @staticmethod def _add_perf_stat_metric(line, label, context): line = line.split('#')[0] # comment match = PERF_COUNT_REGEX.search(line) if not match: return classifiers = {} cpu = match.group(1) if cpu is not None: classifiers['cpu'] = int(cpu.replace('CPU', '')) count = int(match.group(2)) metric = '{}_{}'.format(label, match.group(3)) context.add_metric(metric, count, classifiers=classifiers) def _process_perf_record_output(self, context): for host_file in os.listdir(self.outdir): label, ext = os.path.splitext(host_file) context.add_artifact(label, os.path.join(self.outdir, host_file), 'raw') column_headers = [] column_header_indeces = [] event_type = '' if ext == '.rpt': with open(os.path.join(self.outdir, host_file)) as fh: for line in fh: words = line.split() if not words: continue event_type = self._get_report_event_type(words, event_type) column_headers = self._get_report_column_headers(column_headers, words, 'perf') for column_header in column_headers: column_header_indeces.append(line.find(column_header)) self._add_report_metric(column_headers, column_header_indeces, line, words, context, event_type, label) @staticmethod def _get_report_event_type(words, event_type): if words[0] != '#': return event_type if len(words) == 6 and words[4] == 'event': event_type = words[5] event_type = event_type.strip("'") return event_type def _process_simpleperf_stat_output(self, context): labels = [] for host_file in os.listdir(self.outdir): labels.append(host_file.split('.out')[0]) for opts, label in zip(self.optionstring, labels): stat_file = os.path.join(self.outdir, '{}{}'.format(label, '.out')) if '--csv' in opts: self._process_simpleperf_stat_from_csv(stat_file, context, label) else: self._process_simpleperf_stat_from_raw(stat_file, context, label) @staticmethod def _process_simpleperf_stat_from_csv(stat_file, context, label): with open(stat_file) as csv_file: readCSV = csv.reader(csv_file, delimiter=',') line_num = 0 for row in readCSV: if 'Performance counter statistics' not in row and 'Total test time' not in row: classifiers = {} if '%' in row: classifiers['scaled from(%)'] = row[len(row) - 2].replace('(', '').replace(')', '').replace('%', '') context.add_metric('{}_{}'.format(label, row[1]), row[0], 'count', classifiers=classifiers) line_num += 1 @staticmethod def _process_simpleperf_stat_from_raw(stat_file, context, label): with open(stat_file) as fh: for line in fh: if '#' in line and not line.startswith('#'): units = 'count' if "(ms)" in line: line = line.replace("(ms)", "") units = 'ms' tmp_line = line.split('#')[0] tmp_line = line.strip() count, metric = tmp_line.split(' ')[0], tmp_line.split(' ')[2] count = float(count) if "." in count else int(count.replace(',', '')) classifiers = {} if '%' in line: scaled_percentage = line.split('(')[1].strip().replace(')', '').replace('%', '') classifiers['scaled from(%)'] = int(scaled_percentage) metric = '{}_{}'.format(label, metric) context.add_metric(metric, count, units, classifiers=classifiers) def _process_simpleperf_record_output(self, context): for host_file in os.listdir(self.outdir): label, ext = os.path.splitext(host_file) context.add_artifact(label, os.path.join(self.outdir, host_file), 'raw') if ext != '.rpt': continue column_headers = [] column_header_indeces = [] event_type = '' with open(os.path.join(self.outdir, host_file)) as fh: for line in fh: words = line.split() if not words: continue if words[0] == 'Event:': event_type = words[1] column_headers = self._get_report_column_headers(column_headers, words, 'simpleperf') for column_header in column_headers: column_header_indeces.append(line.find(column_header)) self._add_report_metric(column_headers, column_header_indeces, line, words, context, event_type, label) @staticmethod def _get_report_column_headers(column_headers, words, perf_type): if 'Overhead' not in words: return column_headers if perf_type == 'perf': words.remove('#') column_headers = words # Concatonate Shared Objects header if 'Shared' in column_headers: shared_index = column_headers.index('Shared') column_headers[shared_index:shared_index + 2] = ['{} {}'.format(column_headers[shared_index], column_headers[shared_index + 1])] return column_headers @staticmethod def _add_report_metric(column_headers, column_header_indeces, line, words, context, event_type, label): if '%' not in words[0]: return classifiers = {} for i in range(1, len(column_headers)): classifiers[column_headers[i]] = line[column_header_indeces[i]:column_header_indeces[i + 1]].strip() context.add_metric('{}_{}_Overhead'.format(label, event_type), numeric(words[0].strip('%')), 'percent', classifiers=classifiers) ================================================ FILE: wa/instruments/perfetto.py ================================================ # Copyright 2023 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from devlib import PerfettoCollector from wa import Instrument, Parameter from wa.framework.instrument import very_slow, is_installed from wa.framework.exception import InstrumentError OUTPUT_PERFETTO_TRACE = 'devlib-trace.perfetto-trace' PERFETTO_CONFIG_FILE = 'config.pbtx' class PerfettoInstrument(Instrument): name = 'perfetto' description = """ perfetto is an instrument that interacts with Google's Perfetto tracing infrastructure. From Perfetto's website: Perfetto is a production-grade open-source stack for performance instrumentation and trace analysis. It offers services and libraries for recording system-level and app-level traces, native + java heap profiling, a library for analyzing traces using SQL and a web-based UI to visualize and explore multi-GB traces. The instrument either requires Perfetto to be present on the target device or the standalone tracebox binary to be built from source and included in devlib's Package Bin directory. For more information, consult the PerfettoCollector documentation in devlib. More information can be found on https://perfetto.dev/ """ parameters = [ Parameter('config', kind=str, mandatory=True, description=""" Path to the Perfetto trace config file. All the Perfetto-specific tracing configuration should be done inside that file. This config option should just take a full filesystem path to where the config can be found. """), Parameter('force_tracebox', kind=bool, default=False, description=""" Install tracebox even if traced is already running on the target device. If set to true, the tracebox binary needs to be placed in devlib's Package Bin directory. """) ] def __init__(self, target, **kwargs): super(PerfettoInstrument, self).__init__(target, **kwargs) self.collector = None def initialize(self, context): # pylint: disable=unused-argument self.target_config = self.target.path.join(self.target.working_directory, PERFETTO_CONFIG_FILE) # push the config file to target self.target.push(self.config, self.target_config) collector_params = dict( config=self.target_config, force_tracebox=self.force_tracebox ) self.collector = PerfettoCollector(self.target, **collector_params) @very_slow def start(self, context): # pylint: disable=unused-argument self.collector.start() @very_slow def stop(self, context): # pylint: disable=unused-argument self.collector.stop() def update_output(self, context): self.logger.info('Extracting Perfetto trace from target...') outfile = os.path.join(context.output_directory, OUTPUT_PERFETTO_TRACE) self.collector.set_output(outfile) self.collector.get_data() context.add_artifact('perfetto-bin', outfile, 'data') def teardown(self, context): # pylint: disable=unused-argument self.target.remove(self.collector.target_output_file) def finalize(self, context): # pylint: disable=unused-argument self.target.remove(self.target_config) def validate(self): if is_installed('trace-cmd'): raise InstrumentError('perfetto cannot be used at the same time as trace-cmd') if not os.path.isfile(self.config): raise InstrumentError('perfetto config file not found at "{}"'.format(self.config)) ================================================ FILE: wa/instruments/poller/Makefile ================================================ # CROSS_COMPILE=aarch64-linux-gnu- make # CC=gcc ifdef DEBUG CFLAGS=-static -lc -g else CFLAGS=-static -s -lc -O2 endif poller: poller.c $(CROSS_COMPILE)$(CC) $(CFLAGS) poller.c -o poller clean: rm -rf poller .PHONY: clean ================================================ FILE: wa/instruments/poller/__init__.py ================================================ # Copyright 2015-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=access-member-before-definition,attribute-defined-outside-init,unused-argument import os import pandas as pd from wa import Instrument, Parameter, Executable from wa.framework import signal from wa.framework.exception import ConfigError, InstrumentError from wa.utils.trace_cmd import TraceCmdParser from wa.utils.types import list_or_string class FilePoller(Instrument): name = 'file_poller' description = """ Polls the given files at a set sample interval. The values are output in CSV format. This instrument places a file called poller.csv in each iterations result directory. This file will contain a timestamp column which will be in uS, the rest of the columns will be the contents of the polled files at that time. This instrument will strip any commas or new lines for the files' values before writing them. """ parameters = [ Parameter('sample_interval', kind=int, default=1000, description="""The interval between samples in mS."""), Parameter('files', kind=list_or_string, mandatory=True, description="""A list of paths to the files to be polled"""), Parameter('labels', kind=list_or_string, description=""" A list of lables to be used in the CSV output for the corresponding files. This cannot be used if a `*` wildcard is used in a path. """), Parameter('align_with_ftrace', kind=bool, default=False, description=""" Insert a marker into ftrace that aligns with the first timestamp. During output processing, extract the marker and use it's timestamp to adjust the timestamps in the collected csv so that they align with ftrace. """), Parameter('as_root', kind=bool, default=False, description=""" Whether or not the poller will be run as root. This should be used when the file you need to poll can only be accessed by root. """), Parameter('reopen', kind=bool, default=False, description=""" When enabled files will be re-opened with each read. This is useful for some sysfs/debugfs entries that only generate a value when opened. """), ] def validate(self): if not self.files: raise ConfigError('You must specify atleast one file to poll') if self.labels and any(['*' in f for f in self.files]): raise ConfigError('You cannot used manual labels with `*` wildcards') def initialize(self, context): if not self.target.is_rooted and self.as_root: raise ConfigError('The target is not rooted, cannot run poller as root.') host_poller = context.get_resource(Executable(self, self.target.abi, "poller")) target_poller = self.target.install(host_poller) expanded_paths = [] for path in self.files: if "*" in path: for p in self.target.list_directory(path): expanded_paths.append(p) else: expanded_paths.append(path) self.files = expanded_paths if not self.labels: self.labels = self._generate_labels() self.target_output_path = self.target.path.join(self.target.working_directory, 'poller.csv') self.target_log_path = self.target.path.join(self.target.working_directory, 'poller.log') marker_option = '' if self.align_with_ftrace: marker_option = '-m' signal.connect(self._adjust_timestamps, signal.AFTER_JOB_OUTPUT_PROCESSED) reopen_option = '' if self.reopen: reopen_option = '-r' self.command = '{} {} -t {} {} -l {} {} > {} 2>{}'.format(target_poller, reopen_option, self.sample_interval * 1000, marker_option, ','.join(self.labels), ' '.join(self.files), self.target_output_path, self.target_log_path) def start(self, context): self.target.kick_off(self.command, as_root=self.as_root) def stop(self, context): self.target.killall('poller', signal='TERM', as_root=self.as_root) def update_output(self, context): host_output_file = os.path.join(context.output_directory, 'poller.csv') self.target.pull(self.target_output_path, host_output_file) context.add_artifact('poller-output', host_output_file, kind='data') host_log_file = os.path.join(context.output_directory, 'poller.log') self.target.pull(self.target_log_path, host_log_file) context.add_artifact('poller-log', host_log_file, kind='log') with open(host_log_file) as fh: for line in fh: if 'ERROR' in line: raise InstrumentError(line.strip()) if 'WARNING' in line: self.logger.warning(line.strip()) def teardown(self, context): self.target.remove(self.target_output_path) self.target.remove(self.target_log_path) def _generate_labels(self): # Split paths into their parts path_parts = [f.split(self.target.path.sep) for f in self.files] # Identify which parts differ between at least two of the paths differ_map = [len(set(x)) > 1 for x in zip(*path_parts)] # compose labels from path parts that differ labels = [] for pp in path_parts: label_parts = [p for i, p in enumerate(pp[:-1]) if i >= len(differ_map) or differ_map[i]] label_parts.append(pp[-1]) # always use file name even if same for all labels.append('-'.join(label_parts)) return labels def _adjust_timestamps(self, context): output_file = context.get_artifact_path('poller-output') message = 'Adjusting timestamps inside "{}" to align with ftrace' self.logger.debug(message.format(output_file)) trace_txt = context.get_artifact_path('trace-cmd-txt') trace_parser = TraceCmdParser(filter_markers=False) marker_timestamp = None for event in trace_parser.parse(trace_txt): if event.name == 'print' and 'POLLER_START' in event.text: marker_timestamp = event.timestamp break if marker_timestamp is None: raise InstrumentError('Did not see poller marker in ftrace') df = pd.read_csv(output_file) df.time -= df.time[0] df.time += marker_timestamp df.to_csv(output_file, index=False) ================================================ FILE: wa/instruments/poller/poller.c ================================================ /* Copyright 2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include volatile sig_atomic_t done = 0; void term(int signum) { done = 1; } void strip(char *s) { char *stripped_s = s; while(*s != '\0') { if(*s != ',' && *s != '\n') { *stripped_s++ = *s++; } else { ++s; } } *stripped_s = '\0'; } typedef struct { int fd; char *path; } poll_source_t; int write_trace_marker(char *marker, int size) { int ret; FILE *file; file = fopen("/sys/kernel/debug/tracing/trace_marker", "w"); if (file == NULL) { return -errno; } ret = fwrite(marker, sizeof(char), size, file); fclose(file); return ret; } int main(int argc, char ** argv) { extern char *optarg; extern int optind; int c = 0; int show_help = 0; useconds_t interval = 1000000; char buf[1024]; memset(buf, 0, sizeof(buf)); struct timespec current_time; double time_float; char *labels; int labelCount = 0; int should_write_marker = 0; int reopen_files = 0; int ret; static char usage[] = "usage: %s [-h] [-m] [-r] [-t INTERVAL] FILE [FILE ...]\n" "polls FILE(s) every INTERVAL microseconds and outputs\n" "the results in CSV format including a timestamp to STDOUT\n" "\n" " -h Display this message\n" " -m Insert a marker into ftrace at the time of the first\n" " sample. This marker may be used to align the timestamps\n" " produced by the poller with those of ftrace events.\n" " -r Reopen files on each read (needed for some sysfs/debugfs files)\n" " -t The polling sample interval in microseconds\n" " Defaults to 1000000 (1 second)\n" " -l Comma separated list of labels to use in the CSV\n" " output. This should match the number of files\n"; //Handling command line arguments while ((c = getopt(argc, argv, "hmrt:l:")) != -1) { switch(c) { case 'h': case '?': default: show_help = 1; break; case 'm': should_write_marker = 1; break; case 'r': reopen_files = 1; break; case 't': interval = (useconds_t)atoi(optarg); break; case 'l': labels = optarg; labelCount = 1; int i; for (i=0; labels[i]; i++) labelCount += (labels[i] == ','); } } if (show_help) { fprintf(stderr, usage, argv[0]); exit(1); } if (optind >= argc) { fprintf(stderr, "ERROR: %s: missing file path(s)\n", argv[0]); fprintf(stderr, usage, argv[0]); exit(1); } int num_files = argc - optind; poll_source_t files_to_poll[num_files]; if (labelCount && labelCount != num_files) { fprintf(stderr, "ERROR: %s: %d labels specified but %d files specified\n", argv[0], labelCount, num_files); fprintf(stderr, usage, argv[0]); exit(1); } //Print headers and open files to poll printf("time"); if(labelCount) { printf(",%s", labels); } int i; for (i = 0; i < num_files; i++) { files_to_poll[i].path = argv[optind + i]; files_to_poll[i].fd = open(files_to_poll[i].path, O_RDONLY); if (files_to_poll[i].fd == -1) { fprintf(stderr, "ERROR: Could not open \"%s\", got: %s\n", files_to_poll[i].path, strerror(errno)); exit(2); } if(!labelCount) { printf(",%s", argv[optind + i]); } } printf("\n"); //Setup SIGTERM handler struct sigaction action; memset(&action, 0, sizeof(struct sigaction)); action.sa_handler = term; sigaction(SIGTERM, &action, NULL); //Poll files int bytes_read = 0; while (!done) { clock_gettime(CLOCK_BOOTTIME, ¤t_time); if (should_write_marker) { ret = write_trace_marker("POLLER_START", 12); if (ret < 0) { fprintf(stderr, "ERROR writing trace marker: %s\n", strerror(ret)); exit(ret); } } time_float = (double)current_time.tv_sec; time_float += ((double)current_time.tv_nsec)/1000/1000/1000; printf("%f", time_float); for (i = 0; i < num_files; i++) { if (reopen_files) { // Close and reopen the file to get fresh data close(files_to_poll[i].fd); files_to_poll[i].fd = open(files_to_poll[i].path, O_RDONLY); if (files_to_poll[i].fd == -1) { fprintf(stderr, "WARNING: Could not reopen \"%s\", got: %s\n", files_to_poll[i].path, strerror(errno)); printf(","); continue; } } else { lseek(files_to_poll[i].fd, 0, SEEK_SET); } bytes_read = read(files_to_poll[i].fd, buf, 1024); if (bytes_read < 0) { fprintf(stderr, "WARNING: Read nothing from \"%s\"\n", files_to_poll[i].path); printf(","); continue; } strip(buf); printf(",%s", buf); memset(buf, 0, sizeof(buf)); // "Empty" buffer } printf("\n"); usleep(interval); } //Close files for (i = 0; i < num_files; i++) { close(files_to_poll[i].fd); } exit(0); } ================================================ FILE: wa/instruments/proc_stat/__init__.py ================================================ # Copyright 2020 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import time from datetime import datetime, timedelta import pandas as pd from wa import Instrument, Parameter, File, InstrumentError class ProcStatCollector(Instrument): name = 'proc_stat' description = ''' Collect CPU load information from /proc/stat. ''' parameters = [ Parameter('period', int, default=5, constraint=lambda x: x > 0, description=''' Time (in seconds) between collections. '''), ] def initialize(self, context): # pylint: disable=unused-argument self.host_script = context.get_resource(File(self, 'gather-load.sh')) self.target_script = self.target.install(self.host_script) self.target_output = self.target.get_workpath('proc-stat-raw.csv') self.stop_file = self.target.get_workpath('proc-stat-stop.signal') def setup(self, context): # pylint: disable=unused-argument self.command = '{} sh {} {} {} {} {}'.format( self.target.busybox, self.target_script, self.target.busybox, self.target_output, self.period, self.stop_file, ) self.target.remove(self.target_output) self.target.remove(self.stop_file) def start(self, context): # pylint: disable=unused-argument self.target.kick_off(self.command) def stop(self, context): # pylint: disable=unused-argument self.target.execute('{} touch {}'.format(self.target.busybox, self.stop_file)) def update_output(self, context): self.logger.debug('Waiting for collector script to terminate...') self._wait_for_script() self.logger.debug('Waiting for collector script to terminate...') host_output = os.path.join(context.output_directory, 'proc-stat-raw.csv') self.target.pull(self.target_output, host_output) context.add_artifact('proc-stat-raw', host_output, kind='raw') df = pd.read_csv(host_output) no_ts = df[df.columns[1:]] deltas = (no_ts - no_ts.shift()) total = deltas.sum(axis=1) util = (total - deltas.idle) / total * 100 out_df = pd.concat([df.timestamp, util], axis=1).dropna() out_df.columns = ['timestamp', 'cpu_util'] util_file = os.path.join(context.output_directory, 'proc-stat.csv') out_df.to_csv(util_file, index=False) context.add_artifact('proc-stat', util_file, kind='data') def finalize(self, context): # pylint: disable=unused-argument if self.cleanup_assets and getattr(self, 'target_output'): self.target.remove(self.target_output) self.target.remove(self.target_script) def _wait_for_script(self): start_time = datetime.utcnow() timeout = timedelta(seconds=300) while self.target.file_exists(self.stop_file): delta = datetime.utcnow() - start_time if delta > timeout: raise InstrumentError('Timed out wating for /proc/stat collector to terminate..') ================================================ FILE: wa/instruments/proc_stat/gather-load.sh ================================================ #!/bin/sh BUSYBOX=$1 OUTFILE=$2 PERIOD=$3 STOP_SIGNAL_FILE=$4 if [ "$#" != "4" ]; then echo "USAGE: gather-load.sh BUSYBOX OUTFILE PERIOD STOP_SIGNAL_FILE" exit 1 fi echo "timestamp,user,nice,system,idle,iowait,irq,softirq,steal,guest,guest_nice" > $OUTFILE while true; do echo -n $(${BUSYBOX} date -Iseconds) >> $OUTFILE ${BUSYBOX} cat /proc/stat | ${BUSYBOX} head -n 1 | \ ${BUSYBOX} cut -d ' ' -f 2- | ${BUSYBOX} sed 's/ /,/g' >> $OUTFILE if [ -f $STOP_SIGNAL_FILE ]; then rm $STOP_SIGNAL_FILE break else sleep $PERIOD fi done ================================================ FILE: wa/instruments/screencap.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from devlib.collector.screencapture import ScreenCaptureCollector from wa import Instrument, Parameter class ScreenCaptureInstrument(Instrument): name = 'screen_capture' description = """ A simple instrument which captures the screen on the target devices with a user-specified period. Please note that if a too short period is specified, then this instrument will capture the screen as fast as possible, rather than at the specified periodicity. """ parameters = [ Parameter('period', kind=int, default=10, description=""" Period (in seconds) at which to capture the screen on the target. """), ] def __init__(self, target, **kwargs): super(ScreenCaptureInstrument, self).__init__(target, **kwargs) self.collector = None def setup(self, context): # We need to create a directory for the captured screenshots output_path = os.path.join(context.output_directory, "screen-capture") os.mkdir(output_path) self.collector = ScreenCaptureCollector(self.target, self.period) self.collector.set_output(output_path) self.collector.reset() def start(self, context): # pylint: disable=unused-argument self.collector.start() def stop(self, context): # pylint: disable=unused-argument self.collector.stop() ================================================ FILE: wa/instruments/serialmon.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from devlib import SerialTraceCollector from wa import Instrument, Parameter, hostside class SerialMon(Instrument): name = 'serialmon' description = """ Records the traffic on a serial connection The traffic on a serial connection is monitored and logged to a file. In the event that the device is reset, the instrument will stop monitoring during the reset, and will reconnect once the reset has completed. This is to account for devices (i.e., the Juno) which utilise the serial connection to reset the board. """ parameters = [ Parameter('serial_port', kind=str, default="/dev/ttyS0", description=""" The serial device to monitor. """), Parameter('baudrate', kind=int, default=115200, description=""" The baud-rate to use when connecting to the serial connection. """), ] def __init__(self, target, **kwargs): super(SerialMon, self).__init__(target, **kwargs) self._collector = SerialTraceCollector(target, self.serial_port, self.baudrate) def start_logging(self, context, filename="serial.log"): outpath = os.path.join(context.output_directory, filename) self._collector.set_output(outpath) self._collector.reset() self.logger.debug("Acquiring serial port ({})".format(self.serial_port)) if self._collector.collecting: self.stop_logging(context) self._collector.start() def stop_logging(self, context, identifier="job"): self.logger.debug("Releasing serial port ({})".format(self.serial_port)) if self._collector.collecting: self._collector.stop() data = self._collector.get_data() for l in data: # noqa: E741 context.add_artifact("{}_serial_log".format(identifier), l.path, kind="log") def on_run_start(self, context): self.start_logging(context, "preamble_serial.log") def before_job_queue_execution(self, context): self.stop_logging(context, "preamble") def after_job_queue_execution(self, context): self.start_logging(context, "postamble_serial.log") def on_run_end(self, context): self.stop_logging(context, "postamble") def on_job_start(self, context): self.start_logging(context) def on_job_end(self, context): self.stop_logging(context) @hostside def before_reboot(self, context): self.stop_logging(context) ================================================ FILE: wa/instruments/trace_cmd.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=W0613,E1101 import os from devlib import FtraceCollector from wa import Instrument, Parameter from wa.framework import signal from wa.framework.instrument import very_slow, is_installed from wa.framework.exception import InstrumentError from wa.utils.types import list_of_strings from wa.utils.misc import which OUTPUT_TRACE_FILE = 'trace.dat' OUTPUT_TEXT_FILE = '{}.txt'.format(os.path.splitext(OUTPUT_TRACE_FILE)[0]) TIMEOUT = 180 class TraceCmdInstrument(Instrument): name = 'trace-cmd' description = """ trace-cmd is an instrument which interacts with ftrace Linux kernel internal tracer From trace-cmd man page: trace-cmd command interacts with the ftrace tracer that is built inside the Linux kernel. It interfaces with the ftrace specific files found in the debugfs file system under the tracing directory. trace-cmd reads a list of events it will trace, which can be specified in the config file as follows :: trace_events = ['irq*', 'power*'] If no event is specified, a default set of events that are generally considered useful for debugging/profiling purposes will be enabled. The list of available events can be obtained by rooting and running the following command line on the device :: trace-cmd list You may also specify ``trace_buffer_size`` setting which must be an integer that will be used to set the ftrace buffer size. It will be interpreted as KB:: trace_cmd_buffer_size = 8000 The maximum buffer size varies from device to device, but there is a maximum and trying to set buffer size beyond that will fail. If you plan on collecting a lot of trace over long periods of time, the buffer size will not be enough and you will only get trace for the last portion of your run. To deal with this you can set the ``trace_mode`` setting to ``'record'`` (the default is ``'start'``):: trace_cmd_mode = 'record' This will cause trace-cmd to trace into file(s) on disk, rather than the buffer, and so the limit for the max size of the trace is set by the storage available on device. Bear in mind that ``'record'`` mode *is* more intrusive than the default, so if you do not plan on generating a lot of trace, it is best to use the default ``'start'`` mode. .. note:: Mode names correspond to the underlying trace-cmd executable's command used to implement them. You can find out more about what is happening in each case from trace-cmd documentation: https://lwn.net/Articles/341902/. This instrument comes with an trace-cmd binary that will be copied and used on the device, however post-processing will be, by default, done on-host and you must have trace-cmd installed and in your path. On Ubuntu systems, this may be done with:: sudo apt-get install trace-cmd Alternatively, you may set ``report_on_target`` parameter to ``True`` to enable on-target processing (this is useful when running on non-Linux hosts, but is likely to take longer and may fail on particularly resource-constrained targets). """ parameters = [ Parameter('events', kind=list_of_strings, default=['sched*', 'irq*', 'power*', 'thermal*'], global_alias='trace_events', description=""" Specifies the list of events to be traced. Each event in the list will be passed to trace-cmd with -e parameter and must be in the format accepted by trace-cmd. """), Parameter('functions', kind=list_of_strings, global_alias='trace_functions', description=""" Specifies the list of functions to be traced. """), Parameter('buffer_size', kind=int, default=None, global_alias='trace_buffer_size', description=""" Attempt to set ftrace buffer size to the specified value (in KB). Default buffer size may need to be increased for long-running workloads, or if a large number of events have been enabled. Note: there is a maximum size that the buffer can be set, and that varies from device to device. Attempting to set buffer size higher than this will fail. In that case, this instrument will set the size to the highest possible value by going down from the specified size in ``buffer_size_step`` intervals. """), Parameter('top_buffer_size', kind=int, default=None, global_alias='trace_top_buffer_size', description=""" The same as buffer_size except it sets the size of the top-level buffer instead of the devlib one. If left unset, it will default to the same as the devlib buffer size. """), Parameter('buffer_size_step', kind=int, default=1000, global_alias='trace_buffer_size_step', description=""" Defines the decremental step used if the specified ``buffer_size`` could not be set. This will be subtracted form the buffer size until set succeeds or size is reduced to 1MB. """), Parameter('report', kind=bool, default=True, description=""" Specifies whether reporting should be performed once the binary trace has been generated. """), Parameter('no_install', kind=bool, default=False, description=""" Do not install the bundled trace-cmd and use the one on the device instead. If there is not already a trace-cmd on the device, an error is raised. """), Parameter('report_on_target', kind=bool, default=False, description=""" When enabled generation of reports will be done host-side because the generated file is very large. If trace-cmd is not available on the host device this setting can be disabled and the report will be generated on the target device. .. note:: This requires the latest version of trace-cmd to be installed on the host (the one in your distribution's repos may be too old). """), Parameter('mode', kind=str, default='write-to-memory', allowed_values=['write-to-disk', 'write-to-memory'], description=""" Specifies whether collected traces should be saved in memory or disk. Extensive workloads may hit out of memory issue. Hence, write-to-disk mode can help in such cases. """), ] def __init__(self, target, **kwargs): super(TraceCmdInstrument, self).__init__(target, **kwargs) self.collector = None def initialize(self, context): if not self.target.is_rooted: raise InstrumentError('trace-cmd instrument cannot be used on an unrooted device.') collector_params = dict( events=self.events, functions=self.functions, buffer_size=self.buffer_size, top_buffer_size=self.top_buffer_size, buffer_size_step=1000, automark=False, autoreport=True, autoview=False, no_install=self.no_install, strict=False, report_on_target=False, mode=self.mode, ) if self.report and self.report_on_target: collector_params['autoreport'] = True collector_params['report_on_target'] = True else: collector_params['autoreport'] = False collector_params['report_on_target'] = False self.collector = FtraceCollector(self.target, **collector_params) # Register ourselves as absolute last event before and # first after so we can mark the trace at the right time signal.connect(self.mark_start, signal.BEFORE_WORKLOAD_EXECUTION, priority=11) signal.connect(self.mark_stop, signal.AFTER_WORKLOAD_EXECUTION, priority=11) def setup(self, context): if self.collector: self.collector.reset() @very_slow def start(self, context): if self.collector: self.collector.start() @very_slow def stop(self, context): if self.collector: self.collector.stop() def update_output(self, context): # NOQA pylint: disable=R0912 if not self.collector: return self.logger.info('Extracting trace from target...') outfile = os.path.join(context.output_directory, OUTPUT_TRACE_FILE) self.collector.set_output(outfile) self.collector.get_data() context.add_artifact('trace-cmd-bin', outfile, 'data') if self.report: textfile = os.path.join(context.output_directory, OUTPUT_TEXT_FILE) if not self.report_on_target: self.collector.report(outfile, textfile) context.add_artifact('trace-cmd-txt', textfile, 'export') def teardown(self, context): path = self.target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE) self.target.remove(path) if self.report_on_target: path = self.target.path.join(self.target.working_directory, OUTPUT_TEXT_FILE) self.target.remove(path) def validate(self): if self.report and not self.report_on_target and not which('trace-cmd'): raise InstrumentError('trace-cmd is not in PATH; is it installed?') if is_installed('perfetto'): raise InstrumentError('trace-cmd cannot be used at the same time as perfetto') def mark_start(self, context): if self.is_enabled: self.collector.mark_start() def mark_stop(self, context): if self.is_enabled: self.collector.mark_stop() ================================================ FILE: wa/output_processors/__init__.py ================================================ ================================================ FILE: wa/output_processors/cpustates.py ================================================ # Copyright 2015-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from devlib.utils.csvutil import csvwriter from wa import OutputProcessor, Parameter from wa.utils.cpustates import report_power_stats def _get_cpustates_description(): """ Reuse the description for report_power_stats() but strip away it's parameter docs, as they are not relevant to the OuputProcessor. """ output_lines = [] lines = iter(report_power_stats.__doc__.split('\n')) line = next(lines) while True: try: if line.strip().startswith(':param'): while line.strip(): line = next(lines) output_lines.append(line) line = next(lines) except StopIteration: break return '\n'.join(output_lines) class CpuStatesProcessor(OutputProcessor): name = 'cpustates' description = _get_cpustates_description() parameters = [ Parameter('use_ratios', kind=bool, default=False, description=""" By default proportional values will be reported as percentages, if this flag is enabled, they will be reported as ratios instead. """), Parameter('no_idle', kind=bool, default=False, description=""" Indicate that there will be no idle transitions in the trace. By default, a core will be reported as being in an "unknown" state until the first idle transtion for that core. Normally, this is not an issue, as cores are "nudged" as part of the setup to ensure that there is an idle transtion before the meassured region. However, if all idle states for the core have been disabled, or if the kernel does not have cpuidle, the nudge will not result in an idle transition, which would cause the cores to be reported to be in "unknown" state for the entire execution. If this parameter is set to ``True``, the processor will assume that cores are running prior to the begining of the issue, and they will leave unknown state on the first frequency transition. """), Parameter('split_wfi_states', kind=bool, default=False, description=""" WFI is a very shallow idle state. The core remains powered on when in this state, which means the power usage while in this state will depend on the current voltage, and therefore current frequency. Setting this to ``True`` will track time spent in WFI at each frequency separately, allowing to gain the most accurate picture of energy usage. """), ] def __init__(self, *args, **kwargs): super(CpuStatesProcessor, self).__init__(*args, **kwargs) self.iteration_reports = OrderedDict() def process_job_output(self, output, target_info, run_output): # pylint: disable=unused-argument trace_file = output.get_artifact_path('trace-cmd-txt') if not trace_file: self.logger.warning('Text trace does not appear to have been generated; skipping this iteration.') return if 'cpufreq' not in target_info.modules: msg = '"cpufreq" module not detected on target, cpu frequency information may be missing.' self.logger.warning(msg) if 'cpuidle' not in target_info.modules: msg = '"cpuidle" module not detected on target, cpu idle information may be missing.' self.logger.debug(msg) self.logger.info('Generating power state reports from trace...') reports = report_power_stats( # pylint: disable=unbalanced-tuple-unpacking trace_file=trace_file, output_basedir=output.basepath, cpus=target_info.cpus, use_ratios=self.use_ratios, no_idle=self.no_idle, split_wfi_states=self.split_wfi_states, ) for report in reports.values(): output.add_artifact(report.name, report.filepath, kind='data') iteration_id = (output.id, output.label, output.iteration) self.iteration_reports[iteration_id] = reports # pylint: disable=too-many-locals,unused-argument def process_run_output(self, output, target_info): if not self.iteration_reports: self.logger.warning('No power state reports generated.') return parallel_rows = [] powerstate_rows = [] for iteration_id, reports in self.iteration_reports.items(): job_id, workload, iteration = iteration_id parallel_report = reports['parallel-stats'] powerstate_report = reports['power-state-stats'] for record in parallel_report.values: parallel_rows.append([job_id, workload, iteration] + record) for state in sorted(powerstate_report.state_stats): stats = powerstate_report.state_stats[state] powerstate_rows.append([job_id, workload, iteration, state] + ['{:.3f}'.format(s if s is not None else 0) for s in stats]) outpath = output.get_path('parallel-stats.csv') with csvwriter(outpath) as writer: writer.writerow(['id', 'workload', 'iteration', 'cluster', 'number_of_cores', 'total_time', '%time', '%running_time']) writer.writerows(parallel_rows) output.add_artifact('run-parallel-stats', outpath, kind='export') outpath = output.get_path('power-state-stats.csv') with csvwriter(outpath) as writer: headers = ['id', 'workload', 'iteration', 'state'] headers += ['{} CPU{}'.format(c, i) for i, c in enumerate(powerstate_report.core_names)] writer.writerow(headers) writer.writerows(powerstate_rows) output.add_artifact('run-power-state-stats', outpath, kind='export') ================================================ FILE: wa/output_processors/csvproc.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from devlib.utils.csvutil import csvwriter from wa import OutputProcessor, Parameter from wa.framework.exception import ConfigError from wa.utils.types import list_of_strings class CsvReportProcessor(OutputProcessor): name = 'csv' description = """ Creates a ``results.csv`` in the output directory containing results for all iterations in CSV format, each line containing a single metric. """ parameters = [ Parameter('use_all_classifiers', kind=bool, default=False, global_alias='use_all_classifiers', description=""" If set to ``True``, this will add a column for every classifier that features in at least one collected metric. .. note:: This cannot be ``True`` if ``extra_columns`` is set. """), Parameter('extra_columns', kind=list_of_strings, description=""" List of classifiers to use as columns. .. note:: This cannot be set if ``use_all_classifiers`` is ``True``. """), ] def __init__(self, *args, **kwargs): super(CsvReportProcessor, self).__init__(*args, **kwargs) self.outputs_so_far = [] self.artifact_added = False def validate(self): super(CsvReportProcessor, self).validate() if self.use_all_classifiers and self.extra_columns: msg = 'extra_columns cannot be specified when '\ 'use_all_classifiers is True' raise ConfigError(msg) # pylint: disable=unused-argument def process_job_output(self, output, target_info, run_output): self.outputs_so_far.append(output) self._write_outputs(self.outputs_so_far, run_output) if not self.artifact_added: run_output.add_artifact('run_result_csv', 'results.csv', 'export') self.artifact_added = True # pylint: disable=attribute-defined-outside-init def process_run_output(self, output, target_info): # pylint: disable=unused-argument self.outputs_so_far.append(output) self._write_outputs(self.outputs_so_far, output) if not self.artifact_added: output.add_artifact('run_result_csv', 'results.csv', 'export') self.artifact_added = True # pylint: disable=attribute-defined-outside-init def _write_outputs(self, outputs, output): if self.use_all_classifiers: classifiers = set([]) for out in outputs: for metric in out.metrics: classifiers.update(list(metric.classifiers.keys())) extra_columns = list(classifiers) elif self.extra_columns: extra_columns = self.extra_columns else: extra_columns = [] outfile = output.get_path('results.csv') with csvwriter(outfile) as writer: writer.writerow(['id', 'workload', 'iteration', 'metric', ] + extra_columns + ['value', 'units']) for o in outputs: if o.kind == 'job': header = [o.id, o.label, o.iteration] elif o.kind == 'run': # Should be a RunOutput. Run-level metrics aren't attached # to any job so we leave 'id' and 'iteration' blank, and use # the run name for the 'label' field. header = [None, o.info.run_name, None] else: raise RuntimeError( 'Output of kind "{}" unrecognised by csvproc'.format(o.kind)) for metric in o.result.metrics: row = (header + [metric.name] + [str(metric.classifiers.get(c, '')) for c in extra_columns] + [str(metric.value), metric.units or '']) writer.writerow(row) ================================================ FILE: wa/output_processors/postgresql.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import uuid import collections import tarfile try: import psycopg2 from psycopg2 import (connect, extras) from psycopg2 import Error as Psycopg2Error except ImportError as e: psycopg2 = None import_error_msg = e.args[0] if e.args else str(e) from devlib.target import KernelVersion, KernelConfig from wa import OutputProcessor, Parameter, OutputProcessorError from wa.framework.target.info import CpuInfo from wa.utils.postgres import (POSTGRES_SCHEMA_DIR, cast_level, cast_vanilla, adapt_vanilla, return_as_is, adapt_level, ListOfLevel, adapt_ListOfX, create_iterable_adapter, get_schema_versions) from wa.utils.serializer import json from wa.utils.types import level class PostgresqlResultProcessor(OutputProcessor): name = 'postgres' description = """ Stores results in a Postgresql database. The structure of this database can easily be understood by examining the postgres_schema.sql file (the schema used to generate it): {} """.format(os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql')) parameters = [ Parameter('username', default='postgres', description=""" This is the username that will be used to connect to the Postgresql database. Note that depending on whether the user has privileges to modify the database (normally only possible on localhost), the user may only be able to append entries. """), Parameter('password', default=None, description=""" The password to be used to connect to the specified database with the specified username. """), Parameter('dbname', default='wa', description=""" Name of the database that will be created or added to. Note, to override this, you can specify a value in your user wa configuration file. """), Parameter('host', kind=str, default='localhost', description=""" The host where the Postgresql server is running. The default is localhost (i.e. the machine that wa is running on). This is useful for complex systems where multiple machines may be executing workloads and uploading their results to a remote, centralised database. """), Parameter('port', kind=str, default='5432', description=""" The port the Postgresql server is running on, on the host. The default is Postgresql's default, so do not change this unless you have modified the default port for Postgresql. """), ] # Commands sql_command = { "create_run": "INSERT INTO Runs (oid, event_summary, basepath, status, timestamp, run_name, project, project_stage, retry_on_status, max_retries, bail_on_init_failure, allow_phone_home, run_uuid, start_time, metadata, state, _pod_version, _pod_serialization_version) " "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", "update_run": "UPDATE Runs SET event_summary=%s, status=%s, timestamp=%s, end_time=%s, duration=%s, state=%s WHERE oid=%s;", "create_job": "INSERT INTO Jobs (oid, run_oid, status, retry, label, job_id, iterations, workload_name, metadata, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);", "create_target": "INSERT INTO Targets (oid, run_oid, target, modules, cpus, os, os_version, hostid, hostname, abi, is_rooted, kernel_version, kernel_release, kernel_sha1, kernel_config, sched_features, page_size_kb, system_id, screen_resolution, prop, android_id, _pod_version, _pod_serialization_version) " "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", "create_event": "INSERT INTO Events (oid, run_oid, job_oid, timestamp, message, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s)", "create_artifact": "INSERT INTO Artifacts (oid, run_oid, job_oid, name, large_object_uuid, description, kind, is_dir, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", "create_metric": "INSERT INTO Metrics (oid, run_oid, job_oid, name, value, units, lower_is_better, _pod_version, _pod_serialization_version) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", "create_augmentation": "INSERT INTO Augmentations (oid, run_oid, name) VALUES (%s, %s, %s)", "create_classifier": "INSERT INTO Classifiers (oid, artifact_oid, metric_oid, job_oid, run_oid, key, value) VALUES (%s, %s, %s, %s, %s, %s, %s)", "create_parameter": "INSERT INTO Parameters (oid, run_oid, job_oid, augmentation_oid, resource_getter_oid, name, value, value_type, type) " "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", "create_resource_getter": "INSERT INTO Resource_Getters (oid, run_oid, name) VALUES (%s, %s, %s)", "create_job_aug": "INSERT INTO Jobs_Augs (oid, job_oid, augmentation_oid) VALUES (%s, %s, %s)", "create_large_object": "INSERT INTO LargeObjects (oid, lo_oid) VALUES (%s, %s)" } # Lists to track which run-related items have already been added metrics_already_added = [] # Dicts needed so that jobs can look up ids artifacts_already_added = {} augmentations_already_added = {} # Status bits (flags) first_job_run = True def __init__(self, *args, **kwargs): super(PostgresqlResultProcessor, self).__init__(*args, **kwargs) self.conn = None self.cursor = None self.run_uuid = None self.target_uuid = None def initialize(self, context): if not psycopg2: raise ImportError( 'The psycopg2 module is required for the ' + 'Postgresql Output Processor: {}'.format(import_error_msg)) # N.B. Typecasters are for postgres->python and adapters the opposite self.connect_to_database() # Register the adapters and typecasters for enum types self.cursor.execute("SELECT NULL::status_enum") status_oid = self.cursor.description[0][1] self.cursor.execute("SELECT NULL::param_enum") param_oid = self.cursor.description[0][1] LEVEL = psycopg2.extensions.new_type( (status_oid,), "LEVEL", cast_level) psycopg2.extensions.register_type(LEVEL) PARAM = psycopg2.extensions.new_type( (param_oid,), "PARAM", cast_vanilla) psycopg2.extensions.register_type(PARAM) psycopg2.extensions.register_adapter(level, return_as_is(adapt_level)) psycopg2.extensions.register_adapter( ListOfLevel, adapt_ListOfX(adapt_level)) psycopg2.extensions.register_adapter(KernelVersion, adapt_vanilla) psycopg2.extensions.register_adapter( CpuInfo, adapt_vanilla) psycopg2.extensions.register_adapter( collections.OrderedDict, extras.Json) psycopg2.extensions.register_adapter(dict, extras.Json) psycopg2.extensions.register_adapter( KernelConfig, create_iterable_adapter(2, explicit_iterate=True)) # Register ready-made UUID type adapter extras.register_uuid() # Insert a run_uuid which will be globally accessible during the run self.run_uuid = uuid.UUID(str(uuid.uuid4())) run_output = context.run_output retry_on_status = ListOfLevel(run_output.run_config.retry_on_status) self.cursor.execute( self.sql_command['create_run'], ( self.run_uuid, run_output.event_summary, run_output.basepath, run_output.status, run_output.state.timestamp, run_output.info.run_name, run_output.info.project, run_output.info.project_stage, retry_on_status, run_output.run_config.max_retries, run_output.run_config.bail_on_init_failure, run_output.run_config.allow_phone_home, run_output.info.uuid, run_output.info.start_time, run_output.metadata, json.dumps(run_output.state.to_pod()), run_output.result._pod_version, # pylint: disable=protected-access run_output.result._pod_serialization_version, # pylint: disable=protected-access ) ) self.target_uuid = uuid.uuid4() target_info = context.target_info target_pod = target_info.to_pod() self.cursor.execute( self.sql_command['create_target'], ( self.target_uuid, self.run_uuid, target_pod['target'], target_pod['modules'], target_pod['cpus'], target_pod['os'], target_pod['os_version'], target_pod['hostid'], target_pod['hostname'], target_pod['abi'], target_pod['is_rooted'], # Important caveat: kernel_version is the name of the column in the Targets table # However, this refers to kernel_version.version, not to kernel_version as a whole target_pod['kernel_version'], target_pod['kernel_release'], target_info.kernel_version.sha1, target_info.kernel_config, target_pod['sched_features'], target_pod['page_size_kb'], target_pod['system_id'], # Android Specific list(target_pod.get('screen_resolution', [])), target_pod.get('prop'), target_pod.get('android_id'), target_pod.get('_pod_version'), target_pod.get('_pod_serialization_version'), ) ) # Commit cursor commands self.conn.commit() def export_job_output(self, job_output, target_info, run_output): # pylint: disable=too-many-branches, too-many-statements, too-many-locals, unused-argument ''' Run once for each job to upload information that is updated on a job by job basis. ''' # Ensure we're still connected to the database. self.connect_to_database() job_uuid = uuid.uuid4() # Create a new job self.cursor.execute( self.sql_command['create_job'], ( job_uuid, self.run_uuid, job_output.status, job_output.retry, job_output.label, job_output.id, job_output.iteration, job_output.spec.workload_name, job_output.metadata, job_output.spec._pod_version, # pylint: disable=protected-access job_output.spec._pod_serialization_version, # pylint: disable=protected-access ) ) for classifier in job_output.classifiers: classifier_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_classifier'], ( classifier_uuid, None, None, job_uuid, None, classifier, job_output.classifiers[classifier] ) ) # Update the run table and run-level parameters self.cursor.execute( self.sql_command['update_run'], ( run_output.event_summary, run_output.status, run_output.state.timestamp, run_output.info.end_time, None, json.dumps(run_output.state.to_pod()), self.run_uuid)) for classifier in run_output.classifiers: classifier_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_classifier'], ( classifier_uuid, None, None, None, None, self.run_uuid, classifier, run_output.classifiers[classifier] ) ) self.sql_upload_artifacts(run_output, record_in_added=True) self.sql_upload_metrics(run_output, record_in_added=True) self.sql_upload_augmentations(run_output) self.sql_upload_resource_getters(run_output) self.sql_upload_events(job_output, job_uuid=job_uuid) self.sql_upload_artifacts(job_output, job_uuid=job_uuid) self.sql_upload_metrics(job_output, job_uuid=job_uuid) self.sql_upload_job_augmentations(job_output, job_uuid=job_uuid) self.sql_upload_parameters( "workload", job_output.spec.workload_parameters, job_uuid=job_uuid) self.sql_upload_parameters( "runtime", job_output.spec.runtime_parameters, job_uuid=job_uuid) self.conn.commit() def export_run_output(self, run_output, target_info): # pylint: disable=unused-argument, too-many-locals ''' A final export of the RunOutput that updates existing parameters and uploads ones which are only generated after jobs have run. ''' if self.cursor is None: # Output processor did not initialise correctly. return # Ensure we're still connected to the database. self.connect_to_database() # Update the job statuses following completion of the run for job in run_output.jobs: job_id = job.id job_status = job.status self.cursor.execute( "UPDATE Jobs SET status=%s WHERE job_id=%s and run_oid=%s", ( job_status, job_id, self.run_uuid ) ) run_uuid = self.run_uuid # Update the run entry after jobs have completed run_info_pod = run_output.info.to_pod() run_state_pod = run_output.state.to_pod() sql_command_update_run = self.sql_command['update_run'] self.cursor.execute( sql_command_update_run, ( run_output.event_summary, run_output.status, run_info_pod['start_time'], run_info_pod['end_time'], run_info_pod['duration'], json.dumps(run_state_pod), run_uuid, ) ) self.sql_upload_events(run_output) self.sql_upload_artifacts(run_output, check_uniqueness=True) self.sql_upload_metrics(run_output, check_uniqueness=True) self.sql_upload_augmentations(run_output) self.conn.commit() # Upload functions for use with both jobs and runs def sql_upload_resource_getters(self, output_object): for resource_getter in output_object.run_config.resource_getters: resource_getter_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_resource_getter'], ( resource_getter_uuid, self.run_uuid, resource_getter, ) ) self.sql_upload_parameters( 'resource_getter', output_object.run_config.resource_getters[resource_getter], owner_id=resource_getter_uuid, ) def sql_upload_events(self, output_object, job_uuid=None): for event in output_object.events: event_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_event'], ( event_uuid, self.run_uuid, job_uuid, event.timestamp, event.message, event._pod_version, # pylint: disable=protected-access event._pod_serialization_version, # pylint: disable=protected-access ) ) def sql_upload_job_augmentations(self, output_object, job_uuid=None): ''' This is a table which links the uuids of augmentations to jobs. Note that the augmentations table is prepopulated, leading to the necessity of an augmentaitions_already_added dictionary, which gives us the corresponding uuids. Augmentations which are prefixed by ~ are toggled off and not part of the job, therefore not added. ''' for augmentation in output_object.spec.augmentations: if augmentation.startswith('~'): continue augmentation_uuid = self.augmentations_already_added[augmentation] job_aug_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_job_aug'], ( job_aug_uuid, job_uuid, augmentation_uuid, ) ) def sql_upload_augmentations(self, output_object): for augmentation in output_object.augmentations: if augmentation.startswith('~') or augmentation in self.augmentations_already_added: continue augmentation_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_augmentation'], ( augmentation_uuid, self.run_uuid, augmentation, ) ) self.sql_upload_parameters( 'augmentation', output_object.run_config.augmentations[augmentation], owner_id=augmentation_uuid, ) self.augmentations_already_added[augmentation] = augmentation_uuid def sql_upload_metrics(self, output_object, record_in_added=False, check_uniqueness=False, job_uuid=None): for metric in output_object.metrics: if metric in self.metrics_already_added and check_uniqueness: continue metric_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_metric'], ( metric_uuid, self.run_uuid, job_uuid, metric.name, metric.value, metric.units, metric.lower_is_better, metric._pod_version, # pylint: disable=protected-access metric._pod_serialization_version, # pylint: disable=protected-access ) ) for classifier in metric.classifiers: classifier_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_classifier'], ( classifier_uuid, None, metric_uuid, None, None, classifier, metric.classifiers[classifier], ) ) if record_in_added: self.metrics_already_added.append(metric) def sql_upload_artifacts(self, output_object, record_in_added=False, check_uniqueness=False, job_uuid=None): ''' Uploads artifacts to the database. record_in_added will record the artifacts added in artifacts_aleady_added check_uniqueness will ensure artifacts in artifacts_already_added do not get added again ''' for artifact in output_object.artifacts: if artifact in self.artifacts_already_added and check_uniqueness: self.logger.debug('Skipping uploading {} as already added'.format(artifact)) continue if artifact in self.artifacts_already_added: self._sql_update_artifact(artifact, output_object) else: self._sql_create_artifact(artifact, output_object, record_in_added, job_uuid) def sql_upload_parameters(self, parameter_type, parameter_dict, owner_id=None, job_uuid=None): # Note, currently no augmentation parameters are workload specific, but in the future # this may change augmentation_id = None resource_getter_id = None if parameter_type not in ['workload', 'resource_getter', 'augmentation', 'runtime']: # boot parameters are not yet implemented # device parameters are redundant due to the targets table raise NotImplementedError("{} is not a valid parameter type.".format(parameter_type)) if parameter_type == "resource_getter": resource_getter_id = owner_id elif parameter_type == "augmentation": augmentation_id = owner_id for parameter in parameter_dict: parameter_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_parameter'], ( parameter_uuid, self.run_uuid, job_uuid, augmentation_id, resource_getter_id, parameter, json.dumps(parameter_dict[parameter]), str(type(parameter_dict[parameter])), parameter_type, ) ) def connect_to_database(self): dsn = "dbname={} user={} password={} host={} port={}".format( self.dbname, self.username, self.password, self.host, self.port) try: self.conn = connect(dsn=dsn) except Psycopg2Error as e: raise OutputProcessorError( "Database error, if the database doesn't exist, " + "please use 'wa create database' to create the database: {}".format(e)) self.cursor = self.conn.cursor() self.verify_schema_versions() def execute_sql_line_by_line(self, sql): cursor = self.conn.cursor() for line in sql.replace('\n', "").replace(";", ";\n").split("\n"): if line and not line.startswith('--'): cursor.execute(line) cursor.close() self.conn.commit() self.conn.reset() def verify_schema_versions(self): local_schema_version, db_schema_version = get_schema_versions(self.conn) if local_schema_version != db_schema_version: self.cursor.close() self.cursor = None self.conn.commit() self.conn.reset() msg = 'The current database schema is v{} however the local ' \ 'schema version is v{}. Please update your database ' \ 'with the create command' raise OutputProcessorError(msg.format(db_schema_version, local_schema_version)) def _sql_write_file_lobject(self, source, lobject): with open(source) as lobj_file: lobj_data = lobj_file.read() if len(lobj_data) > 50000000: # Notify if LO inserts larger than 50MB self.logger.debug("Inserting large object of size {}".format(len(lobj_data))) lobject.write(lobj_data) self.conn.commit() def _sql_write_dir_lobject(self, source, lobject): with tarfile.open(fileobj=lobject, mode='w|gz') as lobj_dir: lobj_dir.add(source, arcname='.') self.conn.commit() def _sql_update_artifact(self, artifact, output_object): self.logger.debug('Updating artifact: {}'.format(artifact)) lobj = self.conn.lobject(oid=self.artifacts_already_added[artifact], mode='w') if artifact.is_dir: self._sql_write_dir_lobject(os.path.join(output_object.basepath, artifact.path), lobj) else: self._sql_write_file_lobject(os.path.join(output_object.basepath, artifact.path), lobj) def _sql_create_artifact(self, artifact, output_object, record_in_added=False, job_uuid=None): self.logger.debug('Uploading artifact: {}'.format(artifact)) artifact_uuid = uuid.uuid4() lobj = self.conn.lobject() loid = lobj.oid large_object_uuid = uuid.uuid4() if artifact.is_dir: self._sql_write_dir_lobject(os.path.join(output_object.basepath, artifact.path), lobj) else: self._sql_write_file_lobject(os.path.join(output_object.basepath, artifact.path), lobj) self.cursor.execute( self.sql_command['create_large_object'], ( large_object_uuid, loid, ) ) self.cursor.execute( self.sql_command['create_artifact'], ( artifact_uuid, self.run_uuid, job_uuid, artifact.name, large_object_uuid, artifact.description, str(artifact.kind), artifact.is_dir, artifact._pod_version, # pylint: disable=protected-access artifact._pod_serialization_version, # pylint: disable=protected-access ) ) for classifier in artifact.classifiers: classifier_uuid = uuid.uuid4() self.cursor.execute( self.sql_command['create_classifier'], ( classifier_uuid, artifact_uuid, None, None, None, classifier, artifact.classifiers[classifier], ) ) if record_in_added: self.artifacts_already_added[artifact] = loid ================================================ FILE: wa/output_processors/sqlite.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=attribute-defined-outside-init import os import sqlite3 import uuid from datetime import datetime, timedelta from contextlib import contextmanager from wa import OutputProcessor, Parameter, OutputProcessorError from wa.utils.serializer import json from wa.utils.types import boolean # IMPORTANT: when updating this schema, make sure to bump the version! SCHEMA_VERSION = '0.0.2' SCHEMA = [ '''CREATE TABLE runs ( uuid text, start_time datetime, end_time datetime, duration integer )''', '''CREATE TABLE workload_specs ( id text, run_oid text, number_of_iterations integer, label text, workload_name text, boot_parameters text, runtime_parameters text, workload_parameters text )''', '''CREATE TABLE metrics ( spec_oid int, iteration integer, metric text, value text, units text, lower_is_better integer )''', '''CREATE VIEW results AS SELECT uuid as run_uuid, spec_id, label as workload, iteration, metric, value, units, lower_is_better FROM metrics AS m INNER JOIN ( SELECT ws.OID as spec_oid, ws.id as spec_id, uuid, label FROM workload_specs AS ws INNER JOIN runs AS r ON ws.run_oid = r.OID ) AS wsr ON wsr.spec_oid = m.spec_oid ''', '''CREATE TABLE __meta ( schema_version text )''', '''INSERT INTO __meta VALUES ("{}")'''.format(SCHEMA_VERSION), ] sqlite3.register_adapter(datetime, lambda x: x.isoformat()) sqlite3.register_adapter(timedelta, lambda x: x.total_seconds()) sqlite3.register_adapter(uuid.UUID, str) class SqliteResultProcessor(OutputProcessor): name = 'sqlite' description = """ Stores results in an sqlite database. This may be used to accumulate results of multiple runs in a single file. """ parameters = [ Parameter('database', default=None, global_alias='sqlite_database', description=""" Full path to the sqlite database to be used. If this is not specified then a new database file will be created in the output directory. This setting can be used to accumulate results from multiple runs in a single database. If the specified file does not exist, it will be created, however the directory of the file must exist. .. note:: The value must resolve to an absolute path, relative paths are not allowed; however the value may contain environment variables and/or the home reference "~". """), Parameter('overwrite', kind=boolean, default=False, global_alias='sqlite_overwrite', description=""" If ``True``, this will overwrite the database file if it already exists. If ``False`` (the default) data will be added to the existing file (provided schema versions match -- otherwise an error will be raised). """), ] def __init__(self, *args, **kwargs): super(SqliteResultProcessor, self).__init__(*args, **kwargs) self._last_spec = None self._run_oid = None self._spec_oid = None self._run_initialized = False def export_job_output(self, job_output, target_info, run_output): # pylint: disable=unused-argument if not self._run_initialized: self._init_run(run_output) if self._last_spec != job_output.spec: self._update_spec(job_output.spec) metrics = [(self._spec_oid, job_output.iteration, m.name, str(m.value), m.units, int(m.lower_is_better)) for m in job_output.metrics] if metrics: with self._open_connection() as conn: conn.executemany('INSERT INTO metrics VALUES (?,?,?,?,?,?)', metrics) def export_run_output(self, run_output, target_info): # pylint: disable=unused-argument if not self._run_initialized: self._init_run(run_output) metrics = [(self._spec_oid, run_output.iteration, m.name, str(m.value), m.units, int(m.lower_is_better)) for m in run_output.metrics] if metrics: with self._open_connection() as conn: conn.executemany('INSERT INTO metrics VALUES (?,?,?,?,?,?)', metrics) info = run_output.info with self._open_connection() as conn: conn.execute('''UPDATE runs SET start_time=?, end_time=?, duration=? WHERE OID=?''', (info.start_time, info.end_time, info.duration, self._run_oid)) def _init_run(self, run_output): if not self.database: # pylint: disable=access-member-before-definition self.database = os.path.join(run_output.basepath, 'results.sqlite') self.database = os.path.expandvars(os.path.expanduser(self.database)) if not os.path.exists(self.database): self._init_db() elif self.overwrite: # pylint: disable=no-member os.remove(self.database) self._init_db() else: self._validate_schema_version() self._update_run(run_output.info.uuid) # if the database file happens to be in the output directory, add it as an # artifiact; if it isn't, then RunOutput doesn't need to keep track of it. if not os.path.relpath(self.database, run_output.basepath).startswith('..'): run_output.add_artifact('sqlitedb', self.database, kind='export') self._run_initialized = True def _init_db(self): with self._open_connection() as conn: for command in SCHEMA: conn.execute(command) def _validate_schema_version(self): with self._open_connection() as conn: try: c = conn.execute('SELECT schema_version FROM __meta') found_version = c.fetchone()[0] except sqlite3.OperationalError: message = '{} does not appear to be a valid WA results database.'.format(self.database) raise OutputProcessorError(message) if found_version != SCHEMA_VERSION: message = 'Schema version in {} ({}) does not match current version ({}).' raise OutputProcessorError(message.format(self.database, found_version, SCHEMA_VERSION)) def _update_run(self, run_uuid): with self._open_connection() as conn: conn.execute('INSERT INTO runs (uuid) VALUES (?)', (run_uuid,)) conn.commit() c = conn.execute('SELECT OID FROM runs WHERE uuid=?', (run_uuid,)) self._run_oid = c.fetchone()[0] def _update_spec(self, spec): self._last_spec = spec spec_tuple = (spec.id, self._run_oid, spec.iterations, spec.label, spec.workload_name, json.dumps(spec.boot_parameters.to_pod()), json.dumps(spec.runtime_parameters.to_pod()), json.dumps(spec.workload_parameters.to_pod())) with self._open_connection() as conn: conn.execute('INSERT INTO workload_specs VALUES (?,?,?,?,?,?,?,?)', spec_tuple) conn.commit() c = conn.execute('SELECT OID FROM workload_specs WHERE run_oid=? AND id=?', (self._run_oid, spec.id)) self._spec_oid = c.fetchone()[0] @contextmanager def _open_connection(self): conn = sqlite3.connect(self.database) try: yield conn finally: conn.commit() ================================================ FILE: wa/output_processors/status.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=R0201 import time from collections import Counter from wa.framework.output import Status from wa.framework.output_processor import OutputProcessor from wa.utils.misc import write_table class StatusTxtReporter(OutputProcessor): name = 'status' description = """ Outputs a txt file containing general status information about which runs failed and which were successful """ def process_run_output(self, output, target_info): # pylint: disable=unused-argument counter = Counter() for jo in output.jobs: counter[jo.status] += 1 outfile = output.get_path('status.txt') self.logger.info('Status available in {}'.format(outfile)) with open(outfile, 'w') as wfh: wfh.write('Run name: {}\n'.format(output.info.run_name)) wfh.write('Run status: {}\n'.format(output.status)) wfh.write('Date: {}\n'.format(time.strftime("%c"))) if output.events: wfh.write('Events:\n') for event in output.events: wfh.write('\t{}\n'.format(event.summary)) txt = '{}/{} iterations completed without error\n' wfh.write(txt.format(counter[Status.OK], len(output.jobs))) wfh.write('\n') status_lines = [list(map(str, [o.id, o.label, o.iteration, o.status, o.event_summary])) for o in output.jobs] write_table(status_lines, wfh, align='<<>><') output.add_artifact('run_status_summary', 'status.txt', 'export') ================================================ FILE: wa/output_processors/targz.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import shutil import tarfile from wa import OutputProcessor, Parameter from wa.framework import signal class TargzProcessor(OutputProcessor): name = 'targz' description = ''' Create a tarball of the output directory. This will create a gzip-compressed tarball of the output directory. By default, it will be created at the same level and will have the same name as the output directory but with a .tar.gz extensions. ''' parameters = [ Parameter('outfile', description=''' The name of the output file to be used. If this is not an absolute path, the file will be created realtive to the directory in which WA was invoked. If this contains subdirectories, they must already exist. The name may contain named format specifiers. Any of the ``RunInfo`` fields can be named, resulting in the value of that filed (e.g. ``'start_time'``) being formatted into the tarball name. By default, the output file will be created at the same level, share the name of the WA output directory (but with .tar.gz extension). '''), Parameter('delete-output', kind=bool, default=False, description=''' if set to ``True``, WA output directory will be deleted after the tarball is created. '''), ] def initialize(self, context): if self.delete_output: self.logger.debug('Registering RUN_FINALIZED handler.') signal.connect(self.delete_output_directory, signal.RUN_FINALIZED, priority=-100) def export_run_output(self, run_output, target_info): # pylint: disable=unused-argument if self.outfile: outfile_path = self.outfile.format(**run_output.info.to_pod()) else: outfile_path = run_output.basepath.rstrip('/') + '.tar.gz' self.logger.debug('Creating {}'.format(outfile_path)) with tarfile.open(outfile_path, 'w:gz') as tar: tar.add(run_output.basepath) def delete_output_directory(self, context): self.logger.debug('Deleting output directory') shutil.rmtree(context.run_output.basepath) ================================================ FILE: wa/output_processors/uxperf.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa import OutputProcessor from wa.utils.android import LogcatParser class UxperfProcessor(OutputProcessor): name = 'uxperf' description = ''' Parse logcat for UX_PERF markers to produce performance metrics for workload actions using specified instrumentation. An action represents a series of UI interactions to capture. NOTE: The UX_PERF markers are turned off by default and must be enabled in a agenda file by setting ``markers_enabled`` for the workload to ``True``. ''' # pylint: disable=too-many-locals,unused-argument def process_job_output(self, output, target_info, job_output): logcat = output.get_artifact('logcat') if not logcat: return parser = LogcatParser() start_times = {} filepath = output.get_path(logcat.path) for entry in parser.parse(filepath): if not entry.tag == 'UX_PERF': continue parts = entry.message.split() if len(parts) != 3: message = 'Unexpected UX_PERF message @ {}: {}' self.logger.warning(message.format(entry.timestamp, entry.message)) continue action, state, when = parts when = int(when) if state == 'start': if action in start_times: self.logger.warning('start before end @ {}'.format(entry.timestamp)) start_times[action] = when elif state == 'end': start_time = start_times.pop(action, None) if start_time is None: self.logger.warning('end without start @ {}'.format(entry.timestamp)) continue duration = (when - start_time) / 1000 metric_name = '{}_duration'.format(action) output.add_metric(metric_name, duration, 'microseconds', lower_is_better=True) else: self.logger.warning('Unexpected state "{}" @ {}'.format(state, entry.timestamp)) ================================================ FILE: wa/tools/revent/Makefile ================================================ # CROSS_COMPILE=aarch64-linux-gnu- make # CC=gcc ifdef DEBUG CFLAGS=-static -lc -g else CFLAGS=-static -lc -O2 endif revent: revent.c $(CROSS_COMPILE)$(CC) $(CFLAGS) revent.c -o revent clean: rm -rf revent .PHONY: clean ================================================ FILE: wa/tools/revent/revent.c ================================================ /* Copyright 2012-2017 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define die(args...) do { \ fprintf(stderr, "ERROR: "); \ fprintf(stderr, args); \ fprintf(stderr, "\n"); \ exit(EXIT_FAILURE); \ } while(0) #define dprintf(args...) if (verbose) printf(args) #define INPDEV_MAX_DEVICES 16 #define INPDEV_MAX_PATH 30 #define MAX_NAME_LEN 255 #define EV_BITS_SIZE (EV_MAX / 8 + 1) #define KEY_BITS_SIZE (KEY_MAX / 8 + 1) #define HEADER_PADDING_SIZE 6 #define EVENT_PADDING_SIZE 4 const char MAGIC[] = "REVENT"; // NOTE: This should be incremented if any changes are made to the file format. // Should that be the case, also make sure to update the format description // in doc/source/revent.rst and the Python parser in wa/utils/revent.py. uint16_t FORMAT_VERSION = 3; typedef enum { FALSE=0, TRUE } bool_t; typedef enum { GENERAL_MODE=0, GAMEPAD_MODE, INVALID_MODE // should be last } recording_mode_t; typedef enum { RECORD_COMMAND=0, REPLAY_COMMAND, DUMP_COMMAND, INFO_COMMAND, INVALID_COMMAND } revent_command_t; typedef struct { struct input_absinfo absinfo; int ev_code; } absinfo_t; typedef struct { struct input_id id; char name[MAX_NAME_LEN]; char ev_bits[EV_BITS_SIZE]; char abs_bits[KEY_BITS_SIZE]; char rel_bits[KEY_BITS_SIZE]; char key_bits[KEY_BITS_SIZE]; uint32_t num_absinfo; absinfo_t absinfo[ABS_CNT]; } device_info_t; typedef struct { revent_command_t command; recording_mode_t mode; int32_t record_time; int32_t device_number; char *file; } revent_args_t; typedef struct { int32_t num; char **paths; int *fds; int max_fd; } input_devices_t; typedef struct { int16_t dev_idx; struct input_event event; } replay_event_t; typedef struct { uint16_t version; recording_mode_t mode; } revent_record_desc_t; typedef struct { revent_record_desc_t desc; input_devices_t devices; device_info_t *gamepad_info; uint64_t num_events; struct timeval start_time; struct timeval end_time; replay_event_t *events; } revent_recording_t; bool_t verbose = FALSE; bool_t wait_for_stdin = TRUE; bool_t is_numeric(char *string) { int len = strlen(string); int i = 0; while(i < len) { if(!isdigit(string[i])) return FALSE; i++; } return TRUE; } int test_bit(const char *mask, int bit) { return mask[bit / 8] & (1 << (bit % 8)); } int count_bits(const char *mask) { int count = 0, i; static const uint8_t nybble_lookup[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; for (i = 0; i < KEY_MAX/8 + 1; i++) { char byte = mask[i]; count += nybble_lookup[byte & 0x0F] + nybble_lookup[byte >> 4]; } return count; } /* * An input device is considered to be a gamepad if it supports * ABS x and Y axes and the four gamepad buttons (variously known as * square/triangle/circle/X, A/B/X/Y, or north/south/east/west). */ bool_t is_gamepad(device_info_t *dev) { if (!test_bit(dev->abs_bits, ABS_X)) return FALSE; if (!test_bit(dev->abs_bits, ABS_Y)) return FALSE; if (!test_bit(dev->key_bits, BTN_GAMEPAD)) return FALSE; return TRUE; } off_t get_file_size(const char *filename) { struct stat st; if (stat(filename, &st) == 0) return st.st_size; die("Cannot determine size of %s: %s", filename, strerror(errno)); } int get_device_info(int fd, device_info_t *info) { bzero(info, sizeof(device_info_t)); if (ioctl(fd, EVIOCGID, &info->id) < 0) return errno; if (ioctl(fd, EVIOCGNAME(MAX_NAME_LEN * sizeof(char)), &info->name) < 0) return errno; if (ioctl(fd, EVIOCGBIT(0, sizeof(info->ev_bits)), &info->ev_bits) < 0) return errno; int ev_type; for (ev_type = 0 ; ev_type < EV_MAX; ev_type++) { if (test_bit(info->ev_bits, ev_type)) { if (ev_type == EV_ABS) { ioctl(fd, EVIOCGBIT(ev_type, sizeof(info->abs_bits)), &info->abs_bits); int ev_code; for (ev_code = 0; ev_code < KEY_MAX; ev_code++) { if (test_bit(info->abs_bits, ev_code)) { absinfo_t *inf = &info->absinfo[info->num_absinfo++]; inf->ev_code = ev_code; ioctl(fd, EVIOCGABS(ev_code), &inf->absinfo); } } } else if (ev_type == EV_REL) { ioctl(fd, EVIOCGBIT(ev_type, sizeof(info->rel_bits)), &info->rel_bits); } else if (ev_type == EV_KEY) { ioctl(fd, EVIOCGBIT(ev_type, sizeof(info->key_bits)), &info->key_bits); } } } return 0; } void destroy_replay_device(int fd) { if(ioctl(fd, UI_DEV_DESTROY) < 0) die("Could not destroy replay device"); } inline void set_evbit(int fd, int bit) { if(ioctl(fd, UI_SET_EVBIT, bit) < 0) die("Could not set EVBIT %i", bit); } inline void set_keybit(int fd, int bit) { if(ioctl(fd, UI_SET_KEYBIT, bit) < 0) die("Could not set KEYBIT %i", bit); } inline void set_absbit(int fd, int bit) { if(ioctl(fd, UI_SET_ABSBIT, bit) < 0) die("Could not set ABSBIT %i", bit); } inline void set_relbit(int fd, int bit) { if(ioctl(fd, UI_SET_RELBIT, bit) < 0) die("Could not set RELBIT %i", bit); } inline void block_sigterm(sigset_t *oldset) { sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, SIGTERM); sigprocmask(SIG_BLOCK, &sigset, oldset); } // Events are recorded with their original timestamps, but for playback, we // want to treat timestamps as deltas from event zero. void adjust_timestamps(revent_recording_t *recording) { uint64_t i; struct timeval time_zero, time_delta; time_zero.tv_sec = recording->start_time.tv_sec; time_zero.tv_usec = recording->start_time.tv_usec; for(i = 0; i < recording->num_events; i++) { timersub(&recording->events[i].event.time, &time_zero, &time_delta); recording->events[i].event.time.tv_sec = time_delta.tv_sec; recording->events[i].event.time.tv_usec = time_delta.tv_usec; } timersub(&recording->end_time, &time_zero, &time_delta); recording->end_time.tv_sec = time_delta.tv_sec; recording->end_time.tv_usec = time_delta.tv_usec; } int write_record_header(int fd, const revent_record_desc_t *desc) { ssize_t ret; char padding[HEADER_PADDING_SIZE]; ret = write(fd, MAGIC, 6); if (ret < 6) return errno; ret = write(fd, &desc->version, sizeof(desc->version)); if (ret < sizeof(desc->version)) return errno; ret = write(fd, (uint16_t *)&desc->mode, sizeof(uint16_t)); if (ret < sizeof(uint16_t)) return errno; bzero(padding, HEADER_PADDING_SIZE); ret = write(fd, padding, HEADER_PADDING_SIZE); if (ret < HEADER_PADDING_SIZE) return errno; return 0; } int read_record_header(int fd, revent_record_desc_t *desc) { char start[7], padding[HEADER_PADDING_SIZE]; ssize_t ret; ret = read(fd, start, 6); if (ret < 6) return errno; start[6] = '\0'; if (strcmp(start, MAGIC)) return EINVAL; ret = read(fd, &desc->version, sizeof(desc->version)); if (ret < sizeof(desc->version)) return errno; if (desc->version >= 2) { ret = read(fd, &desc->mode, sizeof(uint16_t)); if (ret < sizeof(uint16_t)) return errno; ret = read(fd, padding, HEADER_PADDING_SIZE); if (ret < HEADER_PADDING_SIZE) return errno; } else { /* Version 1 supports only general recordings (mode 0) and * does not have padding */ desc->mode = GENERAL_MODE; } return 0; } int write_general_input_devices(const input_devices_t *devices, FILE *fout) { size_t ret; uint32_t path_len; int i; ret = fwrite(&devices->num, sizeof(uint32_t), 1, fout); if (ret < 1) { return errno; } for (i = 0; i < devices->num; i++) { path_len = (uint32_t)strlen(devices->paths[i]); ret = fwrite(&path_len, sizeof(uint32_t), 1, fout); if (ret < 1) { return errno; } ret = fwrite(devices->paths[i], sizeof(char), path_len, fout); if (ret < path_len) { return errno; } } return 0; } int read_general_input_devices(input_devices_t *devices, FILE *fin) { size_t ret; uint32_t path_len; int i; ret = fread(&devices->num, sizeof(uint32_t), 1, fin); if (ret < 1) { return EIO; } devices->paths = malloc(sizeof(char *) * devices->num); if (devices->paths == NULL) { return ENOMEM; } for (i = 0; i < devices->num; i++) { ret = fread(&path_len, sizeof(uint32_t), 1, fin); if (ret < 1) { return EIO; } devices->paths[i] = malloc(sizeof(char) * path_len + 1); if (devices->paths[i] == NULL) { return ENOMEM; } ret = fread(devices->paths[i], sizeof(char), path_len, fin); if (ret < path_len) { return EIO; } devices->paths[i][path_len] = '\0'; } return 0; } int write_input_id(FILE *fout, const struct input_id *id) { int ret = 0; ret += fwrite(&id->bustype, sizeof(uint16_t), 1, fout); ret += fwrite(&id->vendor, sizeof(uint16_t), 1, fout); ret += fwrite(&id->product, sizeof(uint16_t), 1, fout); ret += fwrite(&id->version, sizeof(uint16_t), 1, fout); if (ret < 4) return errno; return 0; } int read_input_id(FILE *fin, struct input_id *id) { int ret = 0; ret += fread(&id->bustype, sizeof(uint16_t), 1, fin); ret += fread(&id->vendor, sizeof(uint16_t), 1, fin); ret += fread(&id->product, sizeof(uint16_t), 1, fin); ret += fread(&id->version, sizeof(uint16_t), 1, fin); if (ret < 4) return errno; return 0; } int write_absinfo(FILE *fout, const absinfo_t *info) { int ret = 0; ret += fwrite(&info->ev_code, sizeof(int32_t), 1, fout); ret += fwrite(&info->absinfo.value, sizeof(int32_t), 1, fout); ret += fwrite(&info->absinfo.minimum, sizeof(int32_t), 1, fout); ret += fwrite(&info->absinfo.maximum, sizeof(int32_t), 1, fout); ret += fwrite(&info->absinfo.fuzz, sizeof(int32_t), 1, fout); ret += fwrite(&info->absinfo.flat, sizeof(int32_t), 1, fout); ret += fwrite(&info->absinfo.resolution, sizeof(int32_t), 1, fout); if (ret < 7) return errno; return 0; } int read_absinfo(FILE *fin, absinfo_t *info) { int ret = 0; ret += fread(&info->ev_code, sizeof(int32_t), 1, fin); ret += fread(&info->absinfo.value, sizeof(int32_t), 1, fin); ret += fread(&info->absinfo.minimum, sizeof(int32_t), 1, fin); ret += fread(&info->absinfo.maximum, sizeof(int32_t), 1, fin); ret += fread(&info->absinfo.fuzz, sizeof(int32_t), 1, fin); ret += fread(&info->absinfo.flat, sizeof(int32_t), 1, fin); ret += fread(&info->absinfo.resolution, sizeof(int32_t), 1, fin); if (ret < 7) return errno; return 0; } int write_device_info(FILE *fout, const device_info_t *info) { int ret = write_input_id(fout, &info->id); if (ret) return ret; uint32_t name_len = (uint32_t)strlen(info->name); ret = fwrite(&name_len, sizeof(uint32_t), 1, fout); ret += fwrite(info->name, sizeof(char), name_len, fout); if (ret < (name_len + 1)) return EIO; ret = fwrite(info->ev_bits, sizeof(char), EV_BITS_SIZE, fout); ret += fwrite(info->abs_bits, sizeof(char), KEY_BITS_SIZE, fout); ret += fwrite(info->rel_bits, sizeof(char), KEY_BITS_SIZE, fout); ret += fwrite(info->key_bits, sizeof(char), KEY_BITS_SIZE, fout); if (ret < (EV_BITS_SIZE + KEY_BITS_SIZE * 3)) return EIO; printf("EV_BITS_SIZE: %d\n", EV_BITS_SIZE); printf("KEY_BITS_SIZE: %d\n", KEY_BITS_SIZE); ret = fwrite(&info->num_absinfo, sizeof(uint32_t), 1, fout); if (ret < 1) return errno; int i; for (i = 0; i < info->num_absinfo; i++) { ret = write_absinfo(fout, &info->absinfo[i]); if (ret) return ret; } return 0; } int read_device_info(FILE *fin, device_info_t *info) { int ret = read_input_id(fin, &info->id); if (ret) return ret; uint32_t name_len = 0; fread(&name_len, sizeof(uint32_t), 1, fin); if (!name_len) return EIO; ret += fread(info->name, sizeof(char), name_len, fin); if (ret < name_len) return EIO; info->name[name_len] = '\0'; ret = fread(info->ev_bits, sizeof(char), EV_BITS_SIZE, fin); ret += fread(info->abs_bits, sizeof(char), KEY_BITS_SIZE, fin); ret += fread(info->rel_bits, sizeof(char), KEY_BITS_SIZE, fin); ret += fread(info->key_bits, sizeof(char), KEY_BITS_SIZE, fin); if (ret < (EV_BITS_SIZE + KEY_BITS_SIZE * 3)) return EIO; ret = fread(&info->num_absinfo, sizeof(uint32_t), 1, fin); if (ret < 1) return errno; int i; for (i = 0; i < info->num_absinfo; i++) { ret = read_absinfo(fin, &info->absinfo[i]); if (ret) return ret; } return 0; } void print_device_info(device_info_t *info) { printf("device name: %s\n", info->name); printf("bustype: 0x%x vendor: 0x%x product: 0x%x version: 0x%x\n", info->id.bustype, info->id.vendor, info->id.product, info->id.version); printf("abs_bits: %d\n", count_bits(info->abs_bits)); printf("rel_bits: %d\n", count_bits(info->rel_bits)); printf("key_bits: %d\n", count_bits(info->key_bits)); printf("num_absinfo: %ld\n", info->num_absinfo); int i; printf("KEY: "); for (i = 0; i < KEY_MAX; i++) { if (test_bit(info->key_bits, i)) { printf("%04x ", i); } } printf("\n"); struct input_absinfo *inf; int ev_code; printf("ABS:\n"); for (i = 0; i < info->num_absinfo; i++) { ev_code = info->absinfo[i].ev_code; inf = &info->absinfo[i].absinfo; printf("%04x : min %i, max %i, fuzz %0i, flat %i, res %i\n", ev_code, inf->minimum, inf->maximum, inf->fuzz, inf->flat, inf->resolution); } } int read_record_timestamps(FILE *fin, revent_recording_t *recording) { int ret; ret = fread(&recording->start_time.tv_sec, sizeof(uint64_t), 1, fin); if (ret < 1) return errno; ret = fread(&recording->start_time.tv_usec, sizeof(uint64_t), 1, fin); if (ret < 1) return errno; ret = fread(&recording->end_time.tv_sec, sizeof(uint64_t), 1, fin); if (ret < 1) return errno; ret = fread(&recording->end_time.tv_usec, sizeof(uint64_t), 1, fin); if (ret < 1) return errno; return 0; } int write_replay_event(FILE *fout, const replay_event_t *ev) { size_t ret; uint64_t time; ret = fwrite(&ev->dev_idx, sizeof(uint16_t), 1, fout); if (ret < 1) return errno; time = (uint64_t)ev->event.time.tv_sec; ret = fwrite(&time, sizeof(uint64_t), 1, fout); if (ret < 1) return errno; time = (uint64_t)ev->event.time.tv_usec; ret = fwrite(&time, sizeof(uint64_t), 1, fout); if (ret < 1) return errno; ret = fwrite(&ev->event.type, sizeof(uint16_t), 1, fout); if (ret < 1) return errno; ret = fwrite(&ev->event.code, sizeof(uint16_t), 1, fout); if (ret < 1) return errno; ret = fwrite(&ev->event.value, sizeof(uint32_t), 1, fout); if (ret < 1) return errno; return 0; } int read_replay_event(FILE *fin, replay_event_t *ev) { size_t ret; ret = fread(&ev->dev_idx, sizeof(uint16_t), 1, fin); if (ret < 1) return errno; ret = fread(&ev->event.time.tv_sec, sizeof(uint64_t), 1, fin); if (ret < 1) return errno; ret = fread(&ev->event.time.tv_usec, sizeof(uint64_t), 1, fin); if (ret < 1) return errno; ret = fread(&ev->event.type, sizeof(uint16_t), 1, fin); if (ret < 1) return errno; ret = fread(&ev->event.code, sizeof(uint16_t), 1, fin); if (ret < 1) return errno; ret = fread(&ev->event.value, sizeof(uint32_t), 1, fin); if (ret < 1) return errno; return 0; } int read_legacy_replay_event(int fdin, replay_event_t* ev) { size_t rb; char padding[EVENT_PADDING_SIZE]; rb = read(fdin, &(ev->dev_idx), sizeof(int32_t)); if (rb < (int)sizeof(int32_t)){ //Allow for abrupt ending of legacy recordings. if (!errno) return EOF; return errno; } rb = read(fdin, &padding, EVENT_PADDING_SIZE); if (rb < (int)sizeof(int32_t)) return errno; struct timeval time; uint64_t temp_time; rb = read(fdin, &temp_time, sizeof(uint64_t)); if (rb < (int)sizeof(uint64_t)) return errno; time.tv_sec = (time_t)temp_time; rb = read(fdin, &temp_time, sizeof(uint64_t)); if (rb < (int)sizeof(uint64_t)) return errno; time.tv_usec = (suseconds_t)temp_time; ev->event.time = time; rb = read(fdin, &(ev->event.type), sizeof(uint16_t)); if (rb < (int)sizeof(uint16_t)) return errno; rb = read(fdin, &(ev->event.code), sizeof(uint16_t)); if (rb < (int)sizeof(uint16_t)) return errno; rb = read(fdin, &(ev->event.value), sizeof(int32_t)); if (rb < (int)sizeof(int32_t)) return errno; return 0; } int open_revent_recording(const char *filepath, revent_record_desc_t *desc, FILE **fin) { *fin = fopen(filepath, "r"); if (*fin == NULL) return errno; int ret = read_record_header(fileno(*fin), desc); if (ret) return ret; if (desc->version < 0 || desc->version > FORMAT_VERSION) return EPROTO; return 0; } FILE *init_recording(const char *pathname, recording_mode_t mode) { revent_record_desc_t desc = { .mode = mode, .version = FORMAT_VERSION }; FILE *fh = fopen(pathname, "w"); if (fh == NULL) return fh; write_record_header(fileno(fh), &desc); return fh; } void init_input_devices(input_devices_t *devices) { devices->num = 0; devices->max_fd = -1; devices->paths = NULL; devices->fds = NULL; } int init_general_input_devices(input_devices_t *devices) { uint32_t num, i, path_len; char paths[INPDEV_MAX_DEVICES][INPDEV_MAX_PATH]; int fds[INPDEV_MAX_DEVICES]; int max_fd = 0; int ret; int clk_id = CLOCK_MONOTONIC; num = 0; for(i = 0; i < INPDEV_MAX_DEVICES; ++i) { sprintf(paths[num], "/dev/input/event%d", i); fds[num] = open(paths[num], O_RDONLY); if(fds[num] > 0) { if (fds[num] > max_fd) max_fd = fds[num]; if (ret = ioctl(fds[num], EVIOCSCLOCKID, &clk_id)) { dprintf("Failed to set monotonic clock for %s.\n", paths[num]); return -ret; } dprintf("opened %s\n", paths[num]); num++; } else { dprintf("could not open %s\n", paths[num]); } } if (num == 0) return EACCES; devices->num = num; devices->max_fd = max_fd; devices->paths = malloc(sizeof(char *) * num); if (devices->paths == NULL) { return ENOMEM; } for (i = 0; i < num; i ++) { path_len = strlen(paths[i]); devices->paths[i] = malloc(sizeof(char) * (path_len + 1)); if (devices->paths[i] == NULL) return ENOMEM; strncpy(devices->paths[i], paths[i], path_len + 1); } devices->fds = malloc(sizeof(int) * num); if (devices->fds == NULL) { return ENOMEM; } for (i = 0; i < num; i ++) devices->fds[i] = fds[i]; return 0; } void fini_general_input_devices(input_devices_t *devices) { int i; for (i = 0; i < devices->num; i++) { if (devices->fds != NULL) close(devices->fds[i]); if (devices->paths != NULL) free(devices->paths[i]); } free(devices->fds); devices->num = 0; } int init_gamepad_input_devices(input_devices_t *devices, device_info_t *gamepad_info) { int i; char *gamepad_path = NULL; input_devices_t all_devices; device_info_t info; int ret = init_general_input_devices(&all_devices); if (ret) { return ret; } for (i = 0; i < all_devices.num; i++) { ret = get_device_info(all_devices.fds[i], &info); if (ret) { dprintf("Could not get info for %s: %s\n", all_devices.paths[i], strerror(errno)); continue; } if (!is_gamepad(&info)) { dprintf("not a gamepad: %s\n", all_devices.paths[i]); continue; } if (gamepad_path != NULL) { die("More than one device identified as a gamepad (run \"reven info\" to see which)"); } gamepad_path = malloc(sizeof(char) * INPDEV_MAX_PATH); if (gamepad_path == NULL) die("Could not create replay device: %s", strerror(ENOMEM)); strncpy(gamepad_path, all_devices.paths[i], INPDEV_MAX_PATH); memcpy(gamepad_info, &info, sizeof(device_info_t)); } fini_general_input_devices(&all_devices); if (gamepad_path == NULL) { return ENOMEDIUM; } dprintf("Found gamepad: %s\n", gamepad_path); devices->num = 1; devices->paths = malloc(sizeof(char *)); devices->paths[0] = gamepad_path; devices->fds = malloc(sizeof(int *)); if (devices->fds == NULL) return ENOMEM; devices->fds[0] = open(gamepad_path, O_RDONLY); if (devices->fds[0] < 0) { return errno; } int clk_id = CLOCK_MONOTONIC; if (ret = ioctl(devices->fds[0], EVIOCSCLOCKID, &clk_id)) { dprintf("Could not set monotonic clock for the gamepad.\n"); return -ret; } devices->max_fd = devices->fds[0]; return 0; } void fini_gamepad_input_devices(input_devices_t *devices) { fini_general_input_devices(devices); } void init_revent_recording(revent_recording_t *recording) { recording->num_events = 0; recording->desc.version = 0; recording->desc.mode = INVALID_MODE; recording->events = NULL; recording->gamepad_info = NULL; init_input_devices(&recording->devices); } void fini_revent_recording(revent_recording_t *recording) { if (recording->desc.mode == GENERAL_MODE) { fini_general_input_devices(&recording->devices); } else if (recording->desc.mode == GAMEPAD_MODE) { fini_gamepad_input_devices(&recording->devices); free(recording->gamepad_info); } else { // We're finalizing the recording so at this point, // we don't care. } if (recording->num_events) { free(recording->events); } recording->num_events = 0; recording->desc.version = 0; recording->desc.mode = INVALID_MODE; } void open_general_input_devices_for_playback_or_die(input_devices_t *devices) { int i, ret; devices->fds = malloc(sizeof(int) * devices->num); if (devices->fds == NULL) die("Could not allocate file descriptor array: %s", strerror(ENOMEM)); for (i = 0; i < devices->num; i++) { ret = open(devices->paths[i], O_WRONLY | O_NDELAY); if (ret < 0) { die("Could not open \"%s\" for writing: %s", devices->paths[i], strerror(errno)); } devices->fds[i] = ret; if (devices->fds[i] > devices->max_fd) devices->max_fd = devices->fds[i]; dprintf("Opened %s\n", devices->paths[i]); } } int create_replay_device_or_die(const device_info_t *info) { int i; int fd = open("/dev/uinput", O_WRONLY | O_NONBLOCK); if (fd < 0) { if (errno == ENOENT) { die("uinput not supported by the kernel (is the module installed?)"); } else if (errno == EACCES) { die("Cannot access \"/dev/uinput\" (try re-running as root)"); } else { die("Could not open \"/dev/uinput\" for writing: %s", strerror(errno)); } } struct uinput_user_dev uidev; memset(&uidev, 0, sizeof(uidev)); snprintf(uidev.name, UINPUT_MAX_NAME_SIZE, "revent-replay %s", info->name); uidev.id.bustype = BUS_USB; uidev.id.vendor = info->id.vendor; uidev.id.product = info->id.product; uidev.id.version = info->id.version; set_evbit(fd, EV_SYN); set_evbit(fd, EV_KEY); for (i = 0; i < KEY_MAX; i++) { if (test_bit(info->key_bits, i)) set_keybit(fd, i); } set_evbit(fd, EV_REL); for (i = 0; i < REL_MAX; i++) { if (test_bit(info->rel_bits, i)) set_relbit(fd, i); } set_evbit(fd, EV_ABS); for (i = 0; i < info->num_absinfo; i++) { int ev_code = info->absinfo[i].ev_code; set_absbit(fd, ev_code); uidev.absmin[ev_code] = info->absinfo[i].absinfo.minimum; uidev.absmax[ev_code] = info->absinfo[i].absinfo.maximum; uidev.absfuzz[ev_code] = info->absinfo[i].absinfo.fuzz; uidev.absflat[ev_code] = info->absinfo[i].absinfo.flat; } if (write(fd, &uidev, sizeof(uidev)) < sizeof(uidev)) { die("Could not write absinfo:", strerror(errno)); } if(ioctl(fd, UI_DEV_CREATE) < 0) die("Could not create replay device:", strerror(errno)); // wait for the new device to be recognised by the system sleep(3); return fd; } inline void read_revent_recording_or_die(const char *filepath, revent_recording_t *recording) { int ret; FILE *fin; uint64_t i; off_t fsize; ret = open_revent_recording(filepath, &recording->desc, &fin); if (ret) { if (ret == EINVAL) { die("%s does not appear to be an revent recording", filepath); } else if (ret == EPROTO) { die("%s contains recording for unsupported version \"%u\"; max supported version is \"%u\"", filepath, recording->desc.version, FORMAT_VERSION); } else { die("%s revent recording appears to be corrupted", filepath); } } if (recording->desc.mode == GENERAL_MODE) { ret = read_general_input_devices(&recording->devices, fin); if (ret) { die("Could not read devices: %s", strerror(ret)); } recording->gamepad_info = NULL; } else if (recording->desc.mode == GAMEPAD_MODE) { recording->gamepad_info = malloc(sizeof(device_info_t)); if (recording->gamepad_info == NULL) die("Could not allocate gamepad info buffer: %s", strerror(ENOMEM)); ret = read_device_info(fin, recording->gamepad_info); if (ret) die("Could not read gamepad info: %s", strerror(ret)); } else { die("Unexpected recording mode: %d", recording->desc.mode); } if (recording->desc.version > 1) { ret = fread(&recording->num_events, sizeof(uint64_t), 1, fin); if (ret < 1) die("Could not read the number of recorded events"); if (recording->desc.version > 2) { ret = read_record_timestamps(fin, recording); if (ret) die("Could not read recroding timestamps."); } recording->events = malloc(sizeof(replay_event_t) * recording->num_events); if (recording->events == NULL) die("Not enough memory to allocate replay buffer"); // start/end times tracking for recording as a whole was added in version 3 // of recording format; for earlier recordings, use timestamps of the first and // last events. read_replay_event(fin, &recording->events[0]); if (recording->desc.version <= 2) { recording->start_time.tv_sec = recording->events[0].event.time.tv_sec; recording->start_time.tv_usec = recording->events[0].event.time.tv_usec; } for(i=1; i < recording->num_events; i++) { read_replay_event(fin, &recording->events[i]); } if (recording->desc.version <= 2) { recording->end_time.tv_sec = recording->events[i].event.time.tv_sec; recording->end_time.tv_usec = recording->events[i].event.time.tv_usec; } } else { // backwards compatibility /* Prior to verion 2, the total number of recorded events was not being * written as part of the recording. We will use the size of the file on * disk to estimate the recording buffer size and keep reading the events * untils EOF, keeping track of how many we read so that the total can * then be updated. The format of the events is also different -- it * featured larger device ID an unnecessary padding. */ fsize = get_file_size(filepath); recording->events = malloc((size_t)fsize); i = 0; // Safely get file descriptor for fin, by flushing first. fflush(fin); while (1) { ret = read_legacy_replay_event(fileno(fin), &recording->events[i]); if (ret == EOF) { break; } else if (ret) { die("error reading events: %s", strerror(ret)); } i++; } recording->num_events = i; } fclose(fin); } void open_gamepad_input_devices_for_playback_or_die(input_devices_t *devices, const device_info_t *info) { int fd = create_replay_device_or_die(info); devices->num = 1; devices->fds = malloc(sizeof(int)); if (devices->fds == NULL) die("Could not create replay devices: %s", strerror(ENOMEM)); devices->fds[0] = fd; devices->max_fd = fd; } //Used to exit program properly on termination static volatile int EXIT = 0; void exitHandler(int z) { EXIT = 1; } void record(const char *filepath, int delay, recording_mode_t mode) { int ret; struct timespec start_time, end_time; FILE *fout = init_recording(filepath, mode); if (fout == NULL) die("Could not create recording \"%s\": %s", filepath, strerror(errno)); input_devices_t devices; init_input_devices(&devices); if (mode == GENERAL_MODE) { ret = init_general_input_devices(&devices); if (ret) die("Could not initialize input devices: %s", strerror(ret)); ret = write_general_input_devices(&devices, fout); if (ret) die("Could not record input devices: %s", strerror(ret)); } else if (mode == GAMEPAD_MODE) { device_info_t info; ret = init_gamepad_input_devices(&devices, &info); if (ret == ENOMEDIUM) { die("There does not appear to be a gamepad connected"); } else if (ret) { die("Problem initializing gamepad device: %s", strerror(ret)); } ret = write_device_info(fout, &info); if (ret) die("Problem writing gamepad info: %s", strerror(ret)); } else { fclose(fout); die("Invalid recording mode specified"); } sigset_t old_sigset; sigemptyset(&old_sigset); block_sigterm(&old_sigset); // Write the zero size as a place holder and remember the position in the // file stream, so that it may be updated at the end with the actual event // count. Reserving space for five uint64_t's -- the number of events and // end time stamps. uint64_t event_count = 0; long size_pos = ftell(fout); ret = fwrite(&event_count, sizeof(uint64_t), 5, fout); if (ret < 1) die("Could not initialise event count: %s", strerror(errno)); char padding[EVENT_PADDING_SIZE]; bzero(padding, EVENT_PADDING_SIZE); fd_set readfds; struct timespec tout; replay_event_t rev; int32_t maxfd = 0; int32_t keydev = 0; int i; printf("recording...\n"); errno = 0; signal(SIGINT, exitHandler); clock_gettime(CLOCK_MONOTONIC, &start_time); while(1) { FD_ZERO(&readfds); FD_SET(STDIN_FILENO, &readfds); for (i=0; i < devices.num; i++) FD_SET(devices.fds[i], &readfds); /* wait for input */ tout.tv_sec = delay; tout.tv_nsec = 0; ret = pselect(devices.max_fd + 1, &readfds, NULL, NULL, &tout, &old_sigset); if (EXIT){ break; } if (errno == EINTR){ break; } if (!ret){ break; } if (wait_for_stdin && FD_ISSET(STDIN_FILENO, &readfds)) { // in this case the key down for the return key will be recorded // so we need to up the key up memset(&rev, 0, sizeof(rev)); rev.dev_idx = keydev; rev.event.type = EV_KEY; rev.event.code = KEY_ENTER; rev.event.value = 0; gettimeofday(&rev.event.time, NULL); write_replay_event(fout, &rev); // syn memset(&rev, 0, sizeof(rev)); rev.dev_idx = keydev; rev.event.type = EV_SYN; rev.event.code = 0; rev.event.value = 0; gettimeofday(&rev.event.time, NULL); write_replay_event(fout, &rev); dprintf("added fake return exiting...\n"); break; } for (i = 0; i < devices.num; i++) { if (FD_ISSET(devices.fds[i], &readfds)) { dprintf("got event from %s\n", devices.paths[i]); memset(&rev, 0, sizeof(rev)); rev.dev_idx = i; ret = read(devices.fds[i], (void *)&rev.event, sizeof(rev.event)); dprintf("%d event: type %d code %d value %d\n", (unsigned int)ret, rev.event.type, rev.event.code, rev.event.value); if (rev.event.type == EV_KEY && rev.event.code == KEY_ENTER && rev.event.value == 1) keydev = i; write_replay_event(fout, &rev); event_count++; } } } clock_gettime(CLOCK_MONOTONIC, &end_time); dprintf("Writing event count...\n"); if ((ret = fseek(fout, size_pos, SEEK_SET)) == -1) die("Could not write event count: %s", strerror(errno)); ret = fwrite(&event_count, sizeof(uint64_t), 1, fout); if (ret < 1) die("Could not write event count: %s", strerror(errno)); dprintf("Writing recording timestamps...\n"); uint64_t secs, usecs; secs = start_time.tv_sec; fwrite(&secs, sizeof(uint64_t), 1, fout); usecs = start_time.tv_nsec / 1000; fwrite(&usecs, sizeof(uint64_t), 1, fout); secs = end_time.tv_sec; fwrite(&secs, sizeof(uint64_t), 1, fout); usecs = end_time.tv_nsec / 1000; ret = fwrite(&usecs, sizeof(uint64_t), 1, fout); if (ret < 1) die("Could not write recording timestamps: %s\n", strerror(errno)); fclose(fout); dprintf("Recording complete.\n"); if (mode == GENERAL_MODE) { fini_general_input_devices(&devices); } else if (mode == GAMEPAD_MODE) { fini_gamepad_input_devices(&devices); } else { // Should never get here, as would have failed at the beginning die("Unexpected mode on finish"); } } void dump(const char *filepath) { int i, ret = 0; revent_recording_t recording; init_revent_recording(&recording); read_revent_recording_or_die(filepath, &recording); printf("recording version: %u\n", recording.desc.version); printf("recording type: %i\n", recording.desc.mode); printf("number of recorded events: %lu\n", recording.num_events); printf("start time: %ld.%06ld \n", recording.start_time.tv_sec, recording.start_time.tv_usec); printf("end time: %ld.%06ld \n", recording.end_time.tv_sec, recording.end_time.tv_usec); printf("\n"); if (recording.desc.mode == GENERAL_MODE) { printf("devices:\n"); for (i = 0; i < recording.devices.num; i++) { printf("%2i: %s\n", i, recording.devices.paths[i]); } } else if (recording.desc.mode == GAMEPAD_MODE) { print_device_info(recording.gamepad_info); } else { die("Unexpected recording type: %d", recording.desc.mode); } printf("\nevents:\n"); for (i =0; i < recording.num_events; i++) { printf("%ld.%06ld dev: %d type: %d code: %d value %d\n", recording.events[i].event.time.tv_sec, recording.events[i].event.time.tv_usec, recording.events[i].dev_idx, recording.events[i].event.type, recording.events[i].event.code, recording.events[i].event.value ); } fini_revent_recording(&recording); } void replay(const char *filepath) { revent_recording_t recording; init_revent_recording(&recording); read_revent_recording_or_die(filepath, &recording); switch (recording.desc.mode) { case GENERAL_MODE: dprintf("Opening input devices for playback\n"); open_general_input_devices_for_playback_or_die(&recording.devices); break; case GAMEPAD_MODE: dprintf("Creating gamepad playback device\n"); open_gamepad_input_devices_for_playback_or_die(&recording.devices, recording.gamepad_info); break; default: die("Unexpected recording mod: %d", recording.desc.mode); } dprintf("Adjusting timestamps\n"); adjust_timestamps(&recording); struct timeval start_time, now, desired_time, last_event_delta, delta; bzero(&last_event_delta, sizeof(struct timeval)); gettimeofday(&start_time, NULL); int ret; uint64_t i = 0; dprintf("Starting payback\n"); while (i < recording.num_events) { gettimeofday(&now, NULL); timeradd(&start_time, &last_event_delta, &desired_time); if (timercmp(&desired_time, &now, >)) { timersub(&desired_time, &now, &delta); useconds_t d = (useconds_t)delta.tv_sec * 1000000 + delta.tv_usec; dprintf("now %u.%u desiredtime %u.%u sleeping %u uS\n", (unsigned int)now.tv_sec, (unsigned int)now.tv_usec, (unsigned int)desired_time.tv_sec, (unsigned int)desired_time.tv_usec, d); usleep(d); } int32_t idx = (recording.events[i]).dev_idx; struct input_event ev = (recording.events[i]).event; while(!timercmp(&ev.time, &last_event_delta, !=)) { ret = write(recording.devices.fds[idx], &ev, sizeof(ev)); if (ret != sizeof(ev)) die("Could not replay event"); dprintf("replayed event: type %d code %d value %d\n", ev.type, ev.code, ev.value); i++; if (i >= recording.num_events) { break; } idx = recording.events[i].dev_idx; ev = recording.events[i].event; } last_event_delta = ev.time; } timeradd(&start_time, &recording.end_time, &desired_time); gettimeofday(&now, NULL); if (timercmp(&desired_time, &now, >)) { timersub(&desired_time, &now, &delta); useconds_t d = (useconds_t)delta.tv_sec * 1000000 + delta.tv_usec; dprintf("now %u.%u recording end time %u.%u; sleeping %u uS\n", (unsigned int)now.tv_sec, (unsigned int)now.tv_usec, (unsigned int)desired_time.tv_sec, (unsigned int)desired_time.tv_usec, d); usleep(d); } else { dprintf("now %u.%u recording end time %u.%u; no need to sleep\n", (unsigned int)now.tv_sec, (unsigned int)now.tv_usec, (unsigned int)desired_time.tv_sec, (unsigned int)desired_time.tv_usec); } dprintf("Playback complete\n"); if (recording.desc.mode == GAMEPAD_MODE) destroy_replay_device(recording.devices.fds[0]); fini_revent_recording(&recording); } void info(void) { input_devices_t devices; init_input_devices(&devices); int ret = init_general_input_devices(&devices); if (ret) { die("Could not read input devices: %s", strerror(errno)); } int i; device_info_t info; for (i = 0; i < devices.num; i++) { ret = get_device_info(devices.fds[i], &info); if (ret) { printf("Could not get info for %s: %s\n", devices.paths[i], strerror(errno)); continue; } printf("DEVICE %d\n", i); printf("device path: %s\n", devices.paths[i]); printf("is gamepad: %s\n", is_gamepad(&info) ? "yes" : "no"); print_device_info(&info); printf("\n"); } fini_general_input_devices(&devices); } void usage() { printf("usage:\n revent [-h] [-v] COMMAND [OPTIONS] \n" "\n" " Options:\n" " -h print this help message and quit.\n" " -v enable verbose output.\n" "\n" " Commands:\n" " record [-t SECONDS] [-d DEVICE] FILE\n" " Record input event. stops after return on STDIN (or, optionally, \n" " a fixed delay)\n" "\n" " FILE file into which events will be recorded.\n" " -t SECONDS time, in seconds, for which to record events.\n" " if not specified, recording will continue until\n" " return key is pressed.\n" " -d DEVICE the number of the input device form which\n" " events will be recorded. If not specified, \n" " all available inputs will be used.\n" " -s Recording will not be stopped if there is \n" " input on STDIN.\n" " -g Record in \"gamepad\" mode. A gamepad must be \n" " connected to the device. The recording will only\n" " be done for the gamepad and other input devices\n" " will not be recorded. In addition to the input\n" " events, the information about the gamepad will\n" " also be stored in the recording. When this\n" " recording is played back, revent will first\n" " create a virtual gamepad device based on the\n" " stored info and the event will be played back\n" " into it. This type of recording should be more\n" " portable across different devices.\n" "\n" " replay FILE\n" " replays previously recorded events from the specified file.\n" "\n" " FILE file into which events will be recorded.\n" "\n" " dump FILE\n" " dumps the contents of the specified event log to STDOUT in\n" " human-readable form.\n" "\n" " FILE event log which will be dumped.\n" "\n" " info\n" " shows info about each event char device\n" "\n" ); } void revent_args_init(revent_args_t **rargs, int argc, char** argv) { *rargs = malloc(sizeof(revent_args_t)); revent_args_t *revent_args = *rargs; revent_args->command = INVALID_COMMAND; revent_args->mode = GENERAL_MODE; revent_args->record_time = INT_MAX; revent_args->device_number = -1; revent_args->file = NULL; int opt; while ((opt = getopt(argc, argv, "hgt:d:vs")) != -1) { switch (opt) { case 'h': usage(); exit(0); break; case 'g': revent_args->mode = GAMEPAD_MODE; break; case 't': if (is_numeric(optarg)) { revent_args->record_time = atoi(optarg); dprintf("timeout: %d\n", revent_args->record_time); } else { die("-t parameter must be numeric; got %s.", optarg); } break; case 'd': if (is_numeric(optarg)) { revent_args->device_number = atoi(optarg); dprintf("device: %d\n", revent_args->device_number); } else { die("-d parameter must be numeric; got %s.", optarg); } break; case 'v': verbose = TRUE; break; case 's': wait_for_stdin = FALSE; break; default: die("Unexpected option: %c", opt); } } int next_arg = optind; if (next_arg == argc) { usage(); die("Must specify a command."); } if (!strcmp(argv[next_arg], "record")) revent_args->command = RECORD_COMMAND; else if (!strcmp(argv[next_arg], "replay")) revent_args->command = REPLAY_COMMAND; else if (!strcmp(argv[next_arg], "dump")) revent_args->command = DUMP_COMMAND; else if (!strcmp(argv[next_arg], "info")) revent_args->command = INFO_COMMAND; else { usage(); die("Unknown command -- %s", argv[next_arg]); } next_arg++; if (next_arg != argc) { revent_args->file = argv[next_arg]; dprintf("file: %s\n", revent_args->file); next_arg++; if (next_arg != argc) { die("Trailling arguments (use -h for help)."); } } if ((revent_args->command != RECORD_COMMAND) && (revent_args->record_time != INT_MAX)) { die("-t parameter is only valid for \"record\" command."); } if ((revent_args->command != RECORD_COMMAND) && (revent_args->device_number != -1)) { die("-d parameter is only valid for \"record\" command."); } if ((revent_args->command == INFO_COMMAND) && (revent_args->file != NULL)) { die("File path cannot be specified for \"info\" command."); } if (((revent_args->command == RECORD_COMMAND) || (revent_args->command == REPLAY_COMMAND)) && (revent_args->file == NULL)) { die("Must specify a file for recording/replaying (use -h for help)."); } } int revent_args_close(revent_args_t *rargs) { free(rargs); return 0; } int main(int argc, char** argv) { int i; char *logfile = NULL; revent_args_t *rargs = NULL; revent_args_init(&rargs, argc, argv); switch(rargs->command) { case RECORD_COMMAND: record(rargs->file, rargs->record_time, rargs->mode); break; case REPLAY_COMMAND: replay(rargs->file); break; case DUMP_COMMAND: dump(rargs->file); break; case INFO_COMMAND: info(); break; defaut: die("Unexpected revent command: %d", rargs->command); }; revent_args_close(rargs); return 0; } ================================================ FILE: wa/utils/__init__.py ================================================ ================================================ FILE: wa/utils/android.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os from datetime import datetime from shlex import quote from devlib.utils.android import ApkInfo as _ApkInfo from wa.framework.configuration import settings from wa.utils.serializer import read_pod, write_pod, Podable from wa.utils.types import enum from wa.utils.misc import atomic_write_path LogcatLogLevel = enum(['verbose', 'debug', 'info', 'warn', 'error', 'assert'], start=2) log_level_map = ''.join(n[0].upper() for n in LogcatLogLevel.names) logcat_logger = logging.getLogger('logcat') apk_info_cache_logger = logging.getLogger('apk_info_cache') apk_info_cache = None class LogcatEvent(object): __slots__ = ['timestamp', 'pid', 'tid', 'level', 'tag', 'message'] def __init__(self, timestamp, pid, tid, level, tag, message): self.timestamp = timestamp self.pid = pid self.tid = tid self.level = level self.tag = tag self.message = message def __repr__(self): return '{} {} {} {} {}: {}'.format( self.timestamp, self.pid, self.tid, self.level.name.upper(), self.tag, self.message, ) __str__ = __repr__ class LogcatParser(object): def parse(self, filepath): with open(filepath, errors='replace') as fh: for line in fh: event = self.parse_line(line) if event: yield event def parse_line(self, line): # pylint: disable=no-self-use line = line.strip() if not line or line.startswith('-') or ': ' not in line: return None metadata, message = line.split(': ', 1) parts = metadata.split(None, 5) try: ts = ' '.join([parts.pop(0), parts.pop(0)]) timestamp = datetime.strptime(ts, '%m-%d %H:%M:%S.%f').replace(year=datetime.now().year) pid = int(parts.pop(0)) tid = int(parts.pop(0)) level = LogcatLogLevel.levels[log_level_map.index(parts.pop(0))] tag = (parts.pop(0) if parts else '').strip() except Exception as e: # pylint: disable=broad-except message = 'Invalid metadata for line:\n\t{}\n\tgot: "{}"' logcat_logger.warning(message.format(line, e)) return None return LogcatEvent(timestamp, pid, tid, level, tag, message) # pylint: disable=protected-access,attribute-defined-outside-init class ApkInfo(_ApkInfo, Podable): '''Implement ApkInfo as a Podable class.''' _pod_serialization_version = 1 @staticmethod def from_pod(pod): instance = ApkInfo() instance.path = pod['path'] instance.package = pod['package'] instance.activity = pod['activity'] instance.label = pod['label'] instance.version_name = pod['version_name'] instance.version_code = pod['version_code'] instance.native_code = pod['native_code'] instance.permissions = pod['permissions'] instance._apk_path = pod['_apk_path'] instance._activities = pod['_activities'] instance._methods = pod['_methods'] return instance def __init__(self, path=None): super().__init__(path) self._pod_version = self._pod_serialization_version def to_pod(self): pod = super().to_pod() pod['path'] = self.path pod['package'] = self.package pod['activity'] = self.activity pod['label'] = self.label pod['version_name'] = self.version_name pod['version_code'] = self.version_code pod['native_code'] = self.native_code pod['permissions'] = self.permissions pod['_apk_path'] = self._apk_path pod['_activities'] = self.activities # Force extraction pod['_methods'] = self.methods # Force extraction return pod @staticmethod def _pod_upgrade_v1(pod): pod['_pod_version'] = pod.get('_pod_version', 1) return pod class ApkInfoCache: @staticmethod def _check_env(): if not os.path.exists(settings.cache_directory): os.makedirs(settings.cache_directory) def __init__(self, path=settings.apk_info_cache_file): self._check_env() self.path = path self.last_modified = None self.cache = {} self._update_cache() def store(self, apk_info, apk_id, overwrite=True): self._update_cache() if apk_id in self.cache and not overwrite: raise ValueError('ApkInfo for {} is already in cache.'.format(apk_info.path)) self.cache[apk_id] = apk_info.to_pod() with atomic_write_path(self.path) as at_path: write_pod(self.cache, at_path) self.last_modified = os.stat(self.path) def get_info(self, key): self._update_cache() pod = self.cache.get(key) info = ApkInfo.from_pod(pod) if pod else None return info def _update_cache(self): if not os.path.exists(self.path): return if self.last_modified != os.stat(self.path): apk_info_cache_logger.debug('Updating cache {}'.format(self.path)) self.cache = read_pod(self.path) self.last_modified = os.stat(self.path) def get_cacheable_apk_info(path): # pylint: disable=global-statement global apk_info_cache if not path: return stat = os.stat(path) modified = stat.st_mtime apk_id = '{}-{}'.format(path, modified) info = apk_info_cache.get_info(apk_id) if info: msg = 'Using ApkInfo ({}) from cache'.format(info.package) else: info = ApkInfo(path) apk_info_cache.store(info, apk_id, overwrite=True) msg = 'Storing ApkInfo ({}) in cache'.format(info.package) apk_info_cache_logger.debug(msg) return info apk_info_cache = ApkInfoCache() def build_apk_launch_command(package, activity=None, apk_args=None): args_string = '' if apk_args: for k, v in apk_args.items(): if isinstance(v, str): arg = '--es' v = quote(v) elif isinstance(v, float): arg = '--ef' elif isinstance(v, bool): arg = '--ez' elif isinstance(v, int): arg = '--ei' else: raise ValueError('Unable to encode {} {}'.format(v, type(v))) args_string = '{} {} {} {}'.format(args_string, arg, k, v) if not activity: cmd = 'am start -W {} {}'.format(package, args_string) else: cmd = 'am start -W -n {}/{} {}'.format(package, activity, args_string) return cmd ================================================ FILE: wa/utils/cpustates.py ================================================ # Copyright 2015-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import re import logging from ctypes import c_int32 from collections import defaultdict from devlib.utils.csvutil import create_writer, csvwriter from wa.utils.trace_cmd import TraceCmdParser, trace_has_marker, TRACE_MARKER_START, TRACE_MARKER_STOP logger = logging.getLogger('cpustates') INIT_CPU_FREQ_REGEX = re.compile(r'CPU (?P\d+) FREQUENCY: (?P\d+) kHZ') DEVLIB_CPU_FREQ_REGEX = re.compile(r'cpu_frequency(?:_devlib):\s+state=(?P\d+)\s+cpu_id=(?P\d+)') class CorePowerTransitionEvent(object): kind = 'transition' __slots__ = ['timestamp', 'cpu_id', 'frequency', 'idle_state'] def __init__(self, timestamp, cpu_id, frequency=None, idle_state=None): if (frequency is None) == (idle_state is None): raise ValueError('Power transition must specify a frequency or an idle_state, but not both.') self.timestamp = timestamp self.cpu_id = cpu_id self.frequency = frequency self.idle_state = idle_state def __str__(self): return 'cpu {} @ {} -> freq: {} idle: {}'.format(self.cpu_id, self.timestamp, self.frequency, self.idle_state) def __repr__(self): return 'CPTE(c:{} t:{} f:{} i:{})'.format(self.cpu_id, self.timestamp, self.frequency, self.idle_state) class CorePowerDroppedEvents(object): kind = 'dropped_events' __slots__ = ['cpu_id'] def __init__(self, cpu_id): self.cpu_id = cpu_id def __str__(self): return 'DROPPED EVENTS on CPU{}'.format(self.cpu_id) __repr__ = __str__ class TraceMarkerEvent(object): kind = 'marker' __slots__ = ['name'] def __init__(self, name): self.name = name def __str__(self): return 'MARKER: {}'.format(self.name) class CpuPowerState(object): __slots__ = ['frequency', 'idle_state'] @property def is_idling(self): return self.idle_state is not None and self.idle_state >= 0 @property def is_active(self): return self.idle_state == -1 def __init__(self, frequency=None, idle_state=None): self.frequency = frequency self.idle_state = idle_state def __str__(self): return 'CP(f:{} i:{})'.format(self.frequency, self.idle_state) __repr__ = __str__ class SystemPowerState(object): __slots__ = ['timestamp', 'cpus'] @property def num_cores(self): return len(self.cpus) def __init__(self, num_cores, no_idle=False): self.timestamp = None self.cpus = [] idle_state = -1 if no_idle else None for _ in range(num_cores): self.cpus.append(CpuPowerState(idle_state=idle_state)) def copy(self): new = SystemPowerState(self.num_cores) new.timestamp = self.timestamp for i, c in enumerate(self.cpus): new.cpus[i].frequency = c.frequency new.cpus[i].idle_state = c.idle_state return new def __str__(self): return 'SP(t:{} Cs:{})'.format(self.timestamp, self.cpus) __repr__ = __str__ class PowerStateProcessor(object): """ This takes a stream of power transition events and yields a timeline stream of system power states. """ @property def cpu_states(self): return self.power_state.cpus @property def current_time(self): return self.power_state.timestamp @current_time.setter def current_time(self, value): self.power_state.timestamp = value def __init__(self, cpus, wait_for_marker=True, no_idle=None): if no_idle is None: no_idle = not (cpus[0].cpuidle and cpus[0].cpuidle.states) self.power_state = SystemPowerState(len(cpus), no_idle=no_idle) self.requested_states = {} # cpu_id -> requeseted state self.wait_for_marker = wait_for_marker self._saw_start_marker = False self._saw_stop_marker = False self.exceptions = [] self.idle_related_cpus = build_idle_state_map(cpus) def process(self, event_stream): for event in event_stream: try: next_state = self.update_power_state(event) if self._saw_start_marker or not self.wait_for_marker: yield next_state if self._saw_stop_marker: break except Exception as e: # pylint: disable=broad-except self.exceptions.append(e) else: if self.wait_for_marker: logger.warning("Did not see a STOP marker in the trace") def update_power_state(self, event): """ Update the tracked power state based on the specified event and return updated power state. """ if event.kind == 'transition': self._process_transition(event) elif event.kind == 'dropped_events': self._process_dropped_events(event) elif event.kind == 'marker': if event.name == 'START': self._saw_start_marker = True elif event.name == 'STOP': self._saw_stop_marker = True else: raise ValueError('Unexpected event type: {}'.format(event.kind)) return self.power_state.copy() def _process_transition(self, event): self.current_time = event.timestamp if event.idle_state is None: self.cpu_states[event.cpu_id].frequency = event.frequency else: if event.idle_state == -1: self._process_idle_exit(event) else: self._process_idle_entry(event) def _process_dropped_events(self, event): self.cpu_states[event.cpu_id].frequency = None old_idle_state = self.cpu_states[event.cpu_id].idle_state self.cpu_states[event.cpu_id].idle_state = None related_ids = self.idle_related_cpus[(event.cpu_id, old_idle_state)] for rid in related_ids: self.cpu_states[rid].idle_state = None def _process_idle_entry(self, event): if self.cpu_states[event.cpu_id].is_idling: raise ValueError('Got idle state entry event for an idling core: {}'.format(event)) self.requested_states[event.cpu_id] = event.idle_state self._try_transition_to_idle_state(event.cpu_id, event.idle_state) def _process_idle_exit(self, event): if self.cpu_states[event.cpu_id].is_active: raise ValueError('Got idle state exit event for an active core: {}'.format(event)) self.requested_states.pop(event.cpu_id, None) # remove outstanding request if there is one old_state = self.cpu_states[event.cpu_id].idle_state self.cpu_states[event.cpu_id].idle_state = -1 related_ids = self.idle_related_cpus[(event.cpu_id, old_state)] if old_state is not None: new_state = old_state - 1 for rid in related_ids: if self.cpu_states[rid].idle_state > new_state: self._try_transition_to_idle_state(rid, new_state) def _try_transition_to_idle_state(self, cpu_id, idle_state): related_ids = self.idle_related_cpus[(cpu_id, idle_state)] # Tristate: True - can transition, False - can't transition, # None - unknown idle state on at least one related cpu transition_check = self._can_enter_state(related_ids, idle_state) if transition_check is None: # Unknown state on a related cpu means we're not sure whether we're # entering requested state or a shallower one self.cpu_states[cpu_id].idle_state = None return # Keep trying shallower states until all related while not self._can_enter_state(related_ids, idle_state): idle_state -= 1 related_ids = self.idle_related_cpus[(cpu_id, idle_state)] self.cpu_states[cpu_id].idle_state = idle_state for rid in related_ids: self.cpu_states[rid].idle_state = idle_state def _can_enter_state(self, related_ids, state): """ This is a tri-state check. Returns ``True`` if related cpu states allow transition into this state, ``False`` if related cpu states don't allow transition into this state, and ``None`` if at least one of the related cpus is in an unknown state (so the decision of whether a transition is possible cannot be made). """ for rid in related_ids: rid_requested_state = self.requested_states.get(rid, None) rid_current_state = self.cpu_states[rid].idle_state if rid_current_state is None: return None if rid_current_state < state: if rid_requested_state is None or rid_requested_state < state: return False return True def stream_cpu_power_transitions(events): for event in events: if event.name == 'cpu_idle': state = c_int32(event.state).value yield CorePowerTransitionEvent(event.timestamp, event.cpu_id, idle_state=state) elif event.name == 'cpu_frequency': yield CorePowerTransitionEvent(event.timestamp, event.cpu_id, frequency=event.state) elif event.name == 'DROPPED EVENTS DETECTED': yield CorePowerDroppedEvents(event.cpu_id) elif event.name == 'print': if TRACE_MARKER_START in event.text: yield TraceMarkerEvent('START') elif TRACE_MARKER_STOP in event.text: yield TraceMarkerEvent('STOP') else: if 'cpu_frequency' in event.text: match = DEVLIB_CPU_FREQ_REGEX.search(event.text) else: match = INIT_CPU_FREQ_REGEX.search(event.text) if match: yield CorePowerTransitionEvent(event.timestamp, int(match.group('cpu')), frequency=int(match.group('freq'))) def gather_core_states(system_state_stream, freq_dependent_idle_states=None): # NOQA if freq_dependent_idle_states is None: freq_dependent_idle_states = [] for system_state in system_state_stream: core_states = [] for cpu in system_state.cpus: if cpu.idle_state == -1: core_states.append((-1, cpu.frequency)) elif cpu.idle_state in freq_dependent_idle_states: if cpu.frequency is not None: core_states.append((cpu.idle_state, cpu.frequency)) else: core_states.append((None, None)) else: core_states.append((cpu.idle_state, None)) yield (system_state.timestamp, core_states) def record_state_transitions(reporter, stream): for event in stream: if event.kind == 'transition': reporter.record_transition(event) yield event class PowerStateTransitions(object): name = 'transitions-timeline' def __init__(self, output_directory): self.filepath = os.path.join(output_directory, 'state-transitions-timeline.csv') self.writer, self._wfh = create_writer(self.filepath) headers = ['timestamp', 'cpu_id', 'frequency', 'idle_state'] self.writer.writerow(headers) def update(self, timestamp, core_states): # NOQA # Just recording transitions, not doing anything # with states. pass def record_transition(self, transition): row = [transition.timestamp, transition.cpu_id, transition.frequency, transition.idle_state] self.writer.writerow(row) def report(self): return self def write(self): self._wfh.close() class PowerStateTimeline(object): name = 'state-timeline' def __init__(self, output_directory, cpus): self.filepath = os.path.join(output_directory, 'power-state-timeline.csv') self.idle_state_names = {cpu.id: [s.name for s in cpu.cpuidle.states] for cpu in cpus} self.writer, self._wfh = create_writer(self.filepath) headers = ['ts'] + ['{} CPU{}'.format(cpu.name, cpu.id) for cpu in cpus] self.writer.writerow(headers) def update(self, timestamp, core_states): # NOQA row = [timestamp] for cpu_idx, (idle_state, frequency) in enumerate(core_states): if frequency is None: if idle_state == -1: row.append('Running (unknown kHz)') elif idle_state is None: row.append('unknown') elif not self.idle_state_names[cpu_idx]: row.append('idle[{}]'.format(idle_state)) else: row.append(self.idle_state_names[cpu_idx][idle_state]) else: # frequency is not None if idle_state == -1: row.append(frequency) elif idle_state is None: row.append('unknown') else: row.append('{} ({})'.format(self.idle_state_names[cpu_idx][idle_state], frequency)) self.writer.writerow(row) def report(self): return self def write(self): self._wfh.close() class ParallelStats(object): def __init__(self, output_directory, cpus, use_ratios=False): self.filepath = os.path.join(output_directory, 'parallel-stats.csv') self.clusters = defaultdict(set) self.use_ratios = use_ratios clusters = [] for cpu in cpus: if cpu.cpufreq.related_cpus not in clusters: clusters.append(cpu.cpufreq.related_cpus) for i, clust in enumerate(clusters): self.clusters[str(i)] = set(clust) self.clusters['all'] = {cpu.id for cpu in cpus} self.first_timestamp = None self.last_timestamp = None self.previous_states = None self.parallel_times = defaultdict(lambda: defaultdict(int)) self.running_times = defaultdict(int) def update(self, timestamp, core_states): if self.last_timestamp is not None: delta = timestamp - self.last_timestamp active_cores = [i for i, c in enumerate(self.previous_states) if c and c[0] == -1] for cluster, cluster_cores in self.clusters.items(): clust_active_cores = len(cluster_cores.intersection(active_cores)) self.parallel_times[cluster][clust_active_cores] += delta if clust_active_cores: self.running_times[cluster] += delta else: # initial update self.first_timestamp = timestamp self.last_timestamp = timestamp self.previous_states = core_states def report(self): # NOQA if self.last_timestamp is None: return None report = ParallelReport(self.filepath) total_time = self.last_timestamp - self.first_timestamp for cluster in sorted(self.parallel_times): running_time = self.running_times[cluster] for n in range(len(self.clusters[cluster]) + 1): time = self.parallel_times[cluster][n] time_pc = time / total_time if not self.use_ratios: time_pc *= 100 if n: if running_time: running_time_pc = time / running_time else: running_time_pc = 0 if not self.use_ratios: running_time_pc *= 100 else: running_time_pc = 0 precision = 3 if self.use_ratios else 1 fmt = '{{:.{}f}}'.format(precision) report.add([cluster, n, fmt.format(time), fmt.format(time_pc), fmt.format(running_time_pc), ]) return report class ParallelReport(object): name = 'parallel-stats' def __init__(self, filepath): self.filepath = filepath self.values = [] def add(self, value): self.values.append(value) def write(self): with csvwriter(self.filepath) as writer: writer.writerow(['cluster', 'number_of_cores', 'total_time', '%time', '%running_time']) writer.writerows(self.values) class PowerStateStats(object): def __init__(self, output_directory, cpus, use_ratios=False): self.filepath = os.path.join(output_directory, 'power-state-stats.csv') self.core_names = [cpu.name for cpu in cpus] self.idle_state_names = {cpu.id: [s.name for s in cpu.cpuidle.states] for cpu in cpus} self.use_ratios = use_ratios self.first_timestamp = None self.last_timestamp = None self.previous_states = None self.cpu_states = defaultdict(lambda: defaultdict(int)) def update(self, timestamp, core_states): # NOQA if self.last_timestamp is not None: delta = timestamp - self.last_timestamp for cpu, (idle, freq) in enumerate(self.previous_states): if idle == -1: if freq is not None: state = '{:07}KHz'.format(freq) else: state = 'Running (unknown KHz)' elif freq: state = '{}-{:07}KHz'.format(self.idle_state_names[cpu][idle], freq) elif idle is not None and self.idle_state_names[cpu]: state = self.idle_state_names[cpu][idle] else: state = 'unknown' self.cpu_states[cpu][state] += delta else: # initial update self.first_timestamp = timestamp self.last_timestamp = timestamp self.previous_states = core_states def report(self): if self.last_timestamp is None: return None total_time = self.last_timestamp - self.first_timestamp state_stats = defaultdict(lambda: [None] * len(self.core_names)) for cpu, states in self.cpu_states.items(): for state in states: time = states[state] time_pc = time / total_time if not self.use_ratios: time_pc *= 100 state_stats[state][cpu] = time_pc precision = 3 if self.use_ratios else 1 return PowerStateStatsReport(self.filepath, state_stats, self.core_names, precision) class PowerStateStatsReport(object): name = 'power-state-stats' def __init__(self, filepath, state_stats, core_names, precision=2): self.filepath = filepath self.state_stats = state_stats self.core_names = core_names self.precision = precision def write(self): with csvwriter(self.filepath) as writer: headers = ['state'] + ['{} CPU{}'.format(c, i) for i, c in enumerate(self.core_names)] writer.writerow(headers) for state in sorted(self.state_stats): stats = self.state_stats[state] fmt = '{{:.{}f}}'.format(self.precision) writer.writerow([state] + [fmt.format(s if s is not None else 0) for s in stats]) class CpuUtilizationTimeline(object): name = 'utilization-timeline' def __init__(self, output_directory, cpus): self.filepath = os.path.join(output_directory, 'utilization-timeline.csv') self.writer, self._wfh = create_writer(self.filepath) headers = ['ts'] + ['{} CPU{}'.format(cpu.name, cpu.id) for cpu in cpus] self.writer.writerow(headers) self._max_freq_list = [cpu.cpufreq.available_frequencies[-1] for cpu in cpus if cpu.cpufreq.available_frequencies] def update(self, timestamp, core_states): # NOQA row = [timestamp] for core, [_, frequency] in enumerate(core_states): if frequency is not None and core in self._max_freq_list: frequency /= float(self._max_freq_list[core]) row.append(frequency) else: row.append(None) self.writer.writerow(row) def report(self): return self def write(self): self._wfh.close() def build_idle_state_map(cpus): idle_state_map = defaultdict(list) for cpu_idx, cpu in enumerate(cpus): related_cpus = set(cpu.cpufreq.related_cpus) - set([cpu_idx]) first_cluster_state = cpu.cpuidle.num_states - 1 for state_idx, _ in enumerate(cpu.cpuidle.states): if state_idx < first_cluster_state: idle_state_map[(cpu_idx, state_idx)] = [] else: idle_state_map[(cpu_idx, state_idx)] = list(related_cpus) return idle_state_map def report_power_stats(trace_file, cpus, output_basedir, use_ratios=False, no_idle=None, # pylint: disable=too-many-locals split_wfi_states=False): """ Process trace-cmd output to generate timelines and statistics of CPU power state (a.k.a P- and C-state) transitions in the trace. The results will be written into a subdirectory called "power-stats" under the specified ``output_basedir``. :param trace_file: trace-cmd's text trace to process. :param cpus: A list of ``CpuInfo`` objects describing a target's CPUs. These are typically reported as part of ``TargetInfo`` in WA output. :param output_basedir: Base location for the output. This directory must exist and must not contain a directory of file named ``"power-states"``. :param use_rations: By default, stats will be reported as percentages. Set this to ``True`` to report stats as decimals in the ``0 <= value <= 1`` instead. :param no_idle: ``False`` if cpuidle and at least one idle state per CPU are enabled, should be ``True`` otherwise. This influences the assumptions about CPU's initial states. If not explicitly set, the value for this will be guessed based on whether cpuidle states are present in the first ``CpuInfo``. The output directory will contain the following files: power-state-stats.csv Power state residency statistics for each CPU. Shows the percentage of time a CPU has spent in each of its available power states. parallel-stats.csv Parallel execution stats for each CPU cluster, and combined stats for the whole system. power-state-timeline.csv Timeline of CPU power states. Shows which power state each CPU is in at a point in time. state-transitions-timeline.csv Timeline of CPU power state transitions. Each entry shows a CPU's transition from one power state to another. utilzation-timeline.csv Timeline of CPU utilizations. .. note:: Timeline entries aren't at regular intervals, but at times of power transition events. Stats are generated by assembling a pipeline consisting of the following stages: 1. Parse trace into trace events 2. Filter trace events into power state transition events 3. Record power state transitions 4. Convert transitions into a power states. 5. Collapse the power states into timestamped ``(C state, P state)`` tuples for each cpu. 6. Update reporters/stats generators with cpu states. """ output_directory = os.path.join(output_basedir, 'power-states') if not os.path.isdir(output_directory): os.mkdir(output_directory) freq_dependent_idle_states = [] if split_wfi_states: freq_dependent_idle_states = [0] # init trace, processor, and reporters # note: filter_markers is False here, even though we *will* filter by them. The # reason for this is that we want to observe events before the start # marker in order to establish the intial power states. parser = TraceCmdParser(filter_markers=False, events=['cpu_idle', 'cpu_frequency', 'print']) ps_processor = PowerStateProcessor(cpus, wait_for_marker=trace_has_marker(trace_file), no_idle=no_idle) transitions_reporter = PowerStateTransitions(output_directory) reporters = [ ParallelStats(output_directory, cpus, use_ratios), PowerStateStats(output_directory, cpus, use_ratios), PowerStateTimeline(output_directory, cpus), CpuUtilizationTimeline(output_directory, cpus), transitions_reporter, ] # assemble the pipeline event_stream = parser.parse(trace_file) transition_stream = stream_cpu_power_transitions(event_stream) recorded_trans_stream = record_state_transitions(transitions_reporter, transition_stream) power_state_stream = ps_processor.process(recorded_trans_stream) core_state_stream = gather_core_states(power_state_stream, freq_dependent_idle_states) # execute the pipeline for timestamp, states in core_state_stream: for reporter in reporters: reporter.update(timestamp, states) # report any issues encountered while executing the pipeline if ps_processor.exceptions: logger.warning('There were errors while processing trace:') for e in ps_processor.exceptions: logger.warning(str(e)) # generate reports reports = {} for reporter in reporters: report = reporter.report() report.write() reports[report.name] = report return reports ================================================ FILE: wa/utils/diff.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import re import logging from builtins import zip # pylint: disable=redefined-builtin from future.moves.itertools import zip_longest from wa.utils.misc import diff_tokens, write_table from wa.utils.misc import ensure_file_directory_exists as _f logger = logging.getLogger('diff') def diff_interrupt_files(before, after, result): # pylint: disable=R0914 output_lines = [] with open(before) as bfh: with open(after) as ofh: for bline, aline in zip(bfh, ofh): bchunks = bline.strip().split() while True: achunks = aline.strip().split() if achunks[0] == bchunks[0]: diffchunks = [''] diffchunks.append(achunks[0]) diffchunks.extend([diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]) output_lines.append(diffchunks) break else: # new category appeared in the after file diffchunks = ['>'] + achunks output_lines.append(diffchunks) try: aline = next(ofh) except StopIteration: break # Offset heading columns by one to allow for row labels on subsequent # lines. output_lines[0].insert(0, '') # Any "columns" that do not have headings in the first row are not actually # columns -- they are a single column where space-spearated words got # split. Merge them back together to prevent them from being # column-aligned by write_table. table_rows = [output_lines[0]] num_cols = len(output_lines[0]) for row in output_lines[1:]: table_row = row[:num_cols] table_row.append(' '.join(row[num_cols:])) table_rows.append(table_row) with open(result, 'w') as wfh: write_table(table_rows, wfh) def diff_sysfs_dirs(before, after, result): # pylint: disable=R0914 before_files = [] for root, _, files in os.walk(before): before_files.extend([os.path.join(root, f) for f in files]) before_files = list(filter(os.path.isfile, before_files)) files = [os.path.relpath(f, before) for f in before_files] after_files = [os.path.join(after, f) for f in files] diff_files = [os.path.join(result, f) for f in files] for bfile, afile, dfile in zip(before_files, after_files, diff_files): if not os.path.isfile(afile): logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile)) continue with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321 with open(_f(dfile), 'w') as dfh: for i, (bline, aline) in enumerate(zip_longest(bfh, afh), 1): if aline is None: logger.debug('Lines missing from {}'.format(afile)) break bchunks = re.split(r'(\W+)', bline) achunks = re.split(r'(\W+)', aline) if len(bchunks) != len(achunks): logger.debug('Token length mismatch in {} on line {}'.format(bfile, i)) dfh.write('xxx ' + bline) continue if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and (bchunks[0] == achunks[0])): # if there are only two columns and the first column is the # same, assume it's a "header" column and do not diff it. dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])] else: dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)] dfh.write(''.join(dchunks)) ================================================ FILE: wa/utils/doc.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Utilities for working with and formatting documentation. """ import os import re import inspect from itertools import cycle USER_HOME = os.path.expanduser('~') BULLET_CHARS = '-*' def get_summary(aclass): """ Returns the summary description for an extension class. The summary is the first paragraph (separated by blank line) of the description taken either from the ``descripton`` attribute of the class, or if that is not present, from the class' docstring. """ return get_description(aclass).split('\n\n')[0] def get_description(aclass): """ Return the description of the specified extension class. The description is taken either from ``description`` attribute of the class or its docstring. """ if hasattr(aclass, 'description') and aclass.description: return inspect.cleandoc(aclass.description) if aclass.__doc__: return inspect.getdoc(aclass) else: return 'no documentation found for {}'.format(aclass.__name__) def get_type_name(obj): """Returns the name of the type object or function specified. In case of a lambda, the definiition is returned with the parameter replaced by "value".""" match = re.search(r"<(type|class|function) '?(.*?)'?>", str(obj)) if isinstance(obj, tuple): name = obj[1] elif match.group(1) == 'function': text = str(obj) name = text.split()[1] if name.endswith(''): source = inspect.getsource(obj).strip().replace('\n', ' ') match = re.search(r'lambda\s+(\w+)\s*:\s*(.*?)\s*[\n,]', source) if not match: raise ValueError('could not get name for {}'.format(obj)) name = match.group(2).replace(match.group(1), 'value') else: name = match.group(2) if '.' in name: name = name.split('.')[-1] return name def count_leading_spaces(text): """ Counts the number of leading space characters in a string. TODO: may need to update this to handle whitespace, but shouldn't be necessary as there should be no tabs in Python source. """ nspaces = 0 for c in text: if c == ' ': nspaces += 1 else: break return nspaces def format_column(text, width): """ Formats text into a column of specified width. If a line is too long, it will be broken on a word boundary. The new lines will have the same number of leading spaces as the original line. Note: this will not attempt to join up lines that are too short. """ formatted = [] for line in text.split('\n'): line_len = len(line) if line_len <= width: formatted.append(line) else: words = line.split(' ') new_line = words.pop(0) while words: next_word = words.pop(0) if (len(new_line) + len(next_word) + 1) < width: new_line += ' ' + next_word else: formatted.append(new_line) new_line = ' ' * count_leading_spaces(new_line) + next_word formatted.append(new_line) return '\n'.join(formatted) def format_bullets(text, width, char='-', shift=3, outchar=None): """ Formats text into bulleted list. Assumes each line of input that starts with ``char`` (possibly preceeded with whitespace) is a new bullet point. Note: leading whitespace in the input will *not* be preserved. Instead, it will be determined by ``shift`` parameter. :text: the text to be formated :width: format width (note: must be at least ``shift`` + 4). :char: character that indicates a new bullet point in the input text. :shift: How far bulleted entries will be indented. This indicates the indentation level of the bullet point. Text indentation level will be ``shift`` + 3. :outchar: character that will be used to mark bullet points in the output. If left as ``None``, ``char`` will be used. """ bullet_lines = [] output = '' def __process_bullet(bullet_lines): if bullet_lines: bullet = format_paragraph(indent(' '.join(bullet_lines), shift + 2), width) bullet = bullet[:3] + outchar + bullet[4:] del bullet_lines[:] return bullet + '\n' else: return '' if outchar is None: outchar = char for line in text.split('\n'): line = line.strip() if line.startswith(char): # new bullet output += __process_bullet(bullet_lines) line = line[1:].strip() bullet_lines.append(line) output += __process_bullet(bullet_lines) return output def format_simple_table(rows, headers=None, align='>', show_borders=True, borderchar='='): # pylint: disable=R0914 """Formats a simple table.""" if not rows: return '' rows = [list(map(str, r)) for r in rows] num_cols = len(rows[0]) # cycle specified alignments until we have num_cols of them. This is # consitent with how such cases are handled in R, pandas, etc. it = cycle(align) align = [next(it) for _ in range(num_cols)] cols = list(zip(*rows)) col_widths = [max(list(map(len, c))) for c in cols] if headers: col_widths = [max(len(h), cw) for h, cw in zip(headers, col_widths)] row_format = ' '.join(['{:%s%s}' % (align[i], w) for i, w in enumerate(col_widths)]) row_format += '\n' border = row_format.format(*[borderchar * cw for cw in col_widths]) result = border if show_borders else '' if headers: result += row_format.format(*headers) result += border for row in rows: result += row_format.format(*row) if show_borders: result += border return result def format_paragraph(text, width): """ Format the specified text into a column of specified with. The text is assumed to be a single paragraph and existing line breaks will not be preserved. Leading spaces (of the initial line), on the other hand, will be preserved. """ text = re.sub('\n\n*\\s*', ' ', text.strip('\n')) return format_column(text, width) def format_body(text, width): """ Format the specified text into a column of specified width. The text is assumed to be a "body" of one or more paragraphs separated by one or more blank lines. The initial indentation of the first line of each paragraph will be presevered, but any other formatting may be clobbered. """ text = re.sub('\n\\s*\n', '\n\n', text.strip('\n')) # get rid of all-whitespace lines paragraphs = re.split('\n\n+', text) formatted_paragraphs = [] for p in paragraphs: if p.strip() and p.strip()[0] in BULLET_CHARS: formatted_paragraphs.append(format_bullets(p, width)) else: formatted_paragraphs.append(format_paragraph(p, width)) return '\n\n'.join(formatted_paragraphs) def strip_inlined_text(text): """ This function processes multiline inlined text (e.g. form docstrings) to strip away leading spaces and leading and trailing new lines. """ text = text.strip('\n') lines = [ln.rstrip() for ln in text.split('\n')] # first line is special as it may not have the indet that follows the # others, e.g. if it starts on the same as the multiline quote ("""). nspaces = count_leading_spaces(lines[0]) if len([ln for ln in lines if ln]) > 1: to_strip = min(count_leading_spaces(ln) for ln in lines[1:] if ln) if nspaces >= to_strip: stripped = [lines[0][to_strip:]] else: stripped = [lines[0][nspaces:]] stripped += [ln[to_strip:] for ln in lines[1:]] else: stripped = [lines[0][nspaces:]] return '\n'.join(stripped).strip('\n') def indent(text, spaces=4): """Indent the lines i the specified text by ``spaces`` spaces.""" indented = [] for line in text.split('\n'): if line: indented.append(' ' * spaces + line) else: # do not indent emtpy lines indented.append(line) return '\n'.join(indented) def format_literal(lit): if isinstance(lit, str): return '``\'{}\'``'.format(lit) elif hasattr(lit, 'pattern'): # regex return '``r\'{}\'``'.format(lit.pattern) elif isinstance(lit, dict): content = indent(',\n'.join("{}: {}".format(key, val) for (key, val) in lit.items())) return '::\n\n{}'.format(indent('{{\n{}\n}}'.format(content))) else: return '``{}``'.format(lit) def get_params_rst(parameters): text = '' for param in parameters: text += '{}: {}\n'.format(param.name, param.mandatory and '(mandatory)' or ' ') text += indent("type: ``'{}'``\n\n".format(get_type_name(param.kind))) desc = strip_inlined_text(param.description or '') text += indent('{}\n'.format(desc)) if param.aliases: text += indent('\naliases: {}\n'.format(', '.join(map(format_literal, param.aliases)))) if param.global_alias: text += indent('\nglobal alias: {}\n'.format(format_literal(param.global_alias))) if param.allowed_values: text += indent('\nallowed values: {}\n'.format(', '.join(map(format_literal, param.allowed_values)))) elif param.constraint: text += indent('\nconstraint: ``{}``\n'.format(get_type_name(param.constraint))) if param.default is not None: value = param.default if isinstance(value, str) and value.startswith(USER_HOME): value = value.replace(USER_HOME, '~') text += indent('\ndefault: {}\n'.format(format_literal(value))) text += '\n' return text def get_aliases_rst(aliases): text = '' for alias in aliases: param_str = ', '.join(['{}={}'.format(n, format_literal(v)) for n, v in alias.params.items()]) text += '{}\n{}\n\n'.format(alias.name, indent(param_str)) return text def underline(text, symbol='='): return '{}\n{}\n\n'.format(text, symbol * len(text)) def line_break(length=10, symbol='-'): """Insert a line break""" return '\n{}\n\n'.format(symbol * length) def get_rst_from_plugin(plugin): text = underline(plugin.name, '-') if hasattr(plugin, 'description'): desc = strip_inlined_text(plugin.description or '') elif plugin.__doc__: desc = strip_inlined_text(plugin.__doc__) else: desc = '' text += desc + '\n\n' aliases_rst = get_aliases_rst(plugin.aliases) if aliases_rst: text += underline('aliases', '~') + aliases_rst params_rst = get_params_rst(plugin.parameters) if params_rst: text += underline('parameters', '~') + params_rst return text + '\n' ================================================ FILE: wa/utils/exec_control.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # "environment" management: __environments = {} __active_environment = None def activate_environment(name): """ Sets the current tracking environment to ``name``. If an environment with that name does not already exist, it will be created. """ # pylint: disable=W0603 global __active_environment if name not in list(__environments.keys()): init_environment(name) __active_environment = name def init_environment(name): """ Create a new environment called ``name``, but do not set it as the current environment. :raises: ``ValueError`` if an environment with name ``name`` already exists. """ if name in list(__environments.keys()): msg = "Environment {} already exists".format(name) raise ValueError(msg) __environments[name] = [] def reset_environment(name=None): """ Reset method call tracking for environment ``name``. If ``name`` is not specified or is ``None``, reset the current active environment. :raises: ``ValueError`` if an environment with name ``name`` does not exist. """ if name is not None: if name not in list(__environments.keys()): msg = "Environment {} does not exist".format(name) raise ValueError(msg) __environments[name] = [] else: if __active_environment is None: activate_environment('default') __environments[__active_environment] = [] # The decorators: def once_per_instance(method): """ The specified method will be invoked only once for every bound instance within the environment. """ def wrapper(*args, **kwargs): if __active_environment is None: activate_environment('default') func_id = repr(method.__hash__()) + repr(args[0].__hash__()) if func_id in __environments[__active_environment]: return else: __environments[__active_environment].append(func_id) return method(*args, **kwargs) return wrapper def once_per_class(method): """ The specified method will be invoked only once for all instances of a class within the environment. """ def wrapper(*args, **kwargs): if __active_environment is None: activate_environment('default') func_id = repr(method.__name__) + repr(args[0].__class__) if func_id in __environments[__active_environment]: return else: __environments[__active_environment].append(func_id) return method(*args, **kwargs) return wrapper def once_per_attribute_value(attr_name): """ The specified method will be invoked once for all instances that share the same value for the specified attribute (sameness is established by comparing repr() of the values). """ def wrapped_once_per_attribute_value(method): def wrapper(*args, **kwargs): if __active_environment is None: activate_environment('default') attr_value = getattr(args[0], attr_name) func_id = repr(method.__name__) + repr(args[0].__class__) + repr(attr_value) if func_id in __environments[__active_environment]: return else: __environments[__active_environment].append(func_id) return method(*args, **kwargs) return wrapper return wrapped_once_per_attribute_value def once(method): """ The specified method will be invoked only once within the environment. """ def wrapper(*args, **kwargs): if __active_environment is None: activate_environment('default') func_id = repr(method.__code__) if func_id in __environments[__active_environment]: return else: __environments[__active_environment].append(func_id) return method(*args, **kwargs) return wrapper ================================================ FILE: wa/utils/formatter.py ================================================ # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa.utils.terminalsize import get_terminal_size INDENTATION_FROM_TITLE = 4 class TextFormatter(object): """ This is a base class for text formatting. It mainly ask to implement two methods which are add_item and format_data. The formar will add new text to the formatter, whereas the latter will return a formatted text. The name attribute represents the name of the foramtter. """ name = None data = None def __init__(self): pass def add_item(self, new_data, item_title): """ Add new item to the text formatter. :param new_data: The data to be added :param item_title: A title for the added data """ raise NotImplementedError() def format_data(self): """ It returns a formatted text """ raise NotImplementedError() class DescriptionListFormatter(TextFormatter): name = 'description_list_formatter' data = None def get_text_width(self): if not self._text_width: self._text_width, _ = get_terminal_size() # pylint: disable=unpacking-non-sequence return self._text_width def set_text_width(self, value): self._text_width = value text_width = property(get_text_width, set_text_width) def __init__(self, title=None, width=None): super(DescriptionListFormatter, self).__init__() self.data_title = title self._text_width = width self.longest_word_length = 0 self.data = [] def add_item(self, new_data, item_title): if len(item_title) > self.longest_word_length: self.longest_word_length = len(item_title) self.data[len(self.data):] = [(item_title, self._remove_newlines(new_data))] def format_data(self): parag_indentation = self.longest_word_length + INDENTATION_FROM_TITLE string_formatter = '{}:<{}{} {}'.format('{', parag_indentation, '}', '{}') formatted_data = '' if self.data_title: formatted_data += self.data_title line_width = self.text_width - parag_indentation for title, paragraph in self.data: formatted_data += '\n' title_len = self.longest_word_length - len(title) title += ':' if title_len > 0: title = (' ' * title_len) + title parag_lines = self._break_lines(paragraph, line_width).splitlines() if parag_lines: formatted_data += string_formatter.format(title, parag_lines[0]) for line in parag_lines[1:]: formatted_data += '\n' + string_formatter.format('', line) else: formatted_data += title[:-1] self.text_width = None return formatted_data # Return text's paragraphs sperated in a list, such that each index in the # list is a single text paragraph with no new lines def _remove_newlines(self, new_data): # pylint: disable=R0201 parag_list = [''] parag_num = 0 prv_parag = None # For each paragraph sperated by a new line for paragraph in new_data.splitlines(): if paragraph: parag_list[parag_num] += ' ' + paragraph # if the previous line is NOT empty, then add new empty index for # the next paragraph elif prv_parag: parag_num = 1 parag_list.append('') prv_parag = paragraph # sometimes, we end up with an empty string as the last item so we reomve it if not parag_list[-1]: return parag_list[:-1] return parag_list def _break_lines(self, parag_list, line_width): # pylint: disable=R0201 formatted_paragraphs = [] for para in parag_list: words = para.split() if words: formatted_text = words.pop(0) current_width = len(formatted_text) # for each word in the paragraph, line width is an accumlation of # word length + 1 (1 is for the space after each word). for word in words: word = word.strip() if current_width + len(word) + 1 >= line_width: formatted_text += '\n' + word current_width = len(word) else: formatted_text += ' ' + word current_width += len(word) + 1 formatted_paragraphs.append(formatted_text) return '\n\n'.join(formatted_paragraphs) ================================================ FILE: wa/utils/log.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=E1101 import logging import logging.handlers import os import string import subprocess import threading from contextlib import contextmanager import colorama from devlib import DevlibError from wa.framework import signal from wa.framework.exception import WAError from wa.utils.misc import get_traceback COLOR_MAP = { logging.DEBUG: colorama.Fore.BLUE, logging.INFO: colorama.Fore.GREEN, logging.WARNING: colorama.Fore.YELLOW, logging.ERROR: colorama.Fore.RED, logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED, } RESET_COLOR = colorama.Style.RESET_ALL DEFAULT_INIT_BUFFER_CAPACITY = 1000 _indent_level = 0 _indent_width = 4 _console_handler = None _init_handler = None # pylint: disable=global-statement def init(verbosity=logging.INFO, color=True, indent_with=4, regular_fmt='%(levelname)-8s %(message)s', verbose_fmt='%(asctime)s %(levelname)-8s %(name)10.10s: %(message)s', debug=False): global _indent_width, _console_handler, _init_handler _indent_width = indent_with signal.log_error_func = lambda m: log_error(m, signal.logger) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) error_handler = ErrorSignalHandler(logging.DEBUG) root_logger.addHandler(error_handler) _console_handler = logging.StreamHandler() if color: formatter = ColorFormatter else: formatter = LineFormatter if verbosity: _console_handler.setLevel(logging.DEBUG) _console_handler.setFormatter(formatter(verbose_fmt)) else: _console_handler.setLevel(logging.INFO) _console_handler.setFormatter(formatter(regular_fmt)) root_logger.addHandler(_console_handler) buffer_capacity = int(os.getenv('WA_LOG_BUFFER_CAPACITY', str(DEFAULT_INIT_BUFFER_CAPACITY))) _init_handler = InitHandler(buffer_capacity) _init_handler.setLevel(logging.DEBUG) root_logger.addHandler(_init_handler) logging.basicConfig(level=logging.DEBUG) if not debug: logging.raiseExceptions = False logger = logging.getLogger('CGroups') logger.info = logger.debug def set_level(level): _console_handler.setLevel(level) # pylint: disable=global-statement def add_file(filepath, level=logging.DEBUG, fmt='%(asctime)s %(levelname)-8s %(name)10.10s: %(message)s'): global _init_handler root_logger = logging.getLogger() file_handler = logging.FileHandler(filepath) file_handler.setLevel(level) file_handler.setFormatter(LineFormatter(fmt)) if _init_handler: _init_handler.flush_to_target(file_handler) root_logger.removeHandler(_init_handler) _init_handler = None root_logger.addHandler(file_handler) def enable(logs): if isinstance(logs, list): for log in logs: __enable_logger(log) else: __enable_logger(logs) def disable(logs): if isinstance(logs, list): for log in logs: __disable_logger(log) else: __disable_logger(logs) def __enable_logger(logger): if isinstance(logger, str): logger = logging.getLogger(logger) logger.propagate = True def __disable_logger(logger): if isinstance(logger, str): logger = logging.getLogger(logger) logger.propagate = False # pylint: disable=global-statement def indent(): global _indent_level _indent_level += 1 # pylint: disable=global-statement def dedent(): global _indent_level _indent_level -= 1 @contextmanager def indentcontext(): indent() try: yield finally: dedent() # pylint: disable=global-statement def set_indent_level(level): global _indent_level old_level = _indent_level _indent_level = level return old_level def log_error(e, logger, critical=False): """ Log the specified Exception as an error. The Error message will be formatted differently depending on the nature of the exception. :e: the error to log. should be an instance of ``Exception`` :logger: logger to be used. :critical: if ``True``, this error will be logged at ``logging.CRITICAL`` level, otherwise it will be logged as ``logging.ERROR``. """ if getattr(e, 'logged', None): return if critical: log_func = logger.critical else: log_func = logger.error if isinstance(e, KeyboardInterrupt): old_level = set_indent_level(0) logger.info('Got CTRL-C. Aborting.') set_indent_level(old_level) elif isinstance(e, (WAError, DevlibError)): log_func(str(e)) elif isinstance(e, subprocess.CalledProcessError): tb = get_traceback() log_func(tb) command = e.cmd if e.args: command = '{} {}'.format(command, ' '.join(map(str, e.args))) message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n' log_func(message.format(command, e.returncode, e.output)) elif isinstance(e, SyntaxError): tb = get_traceback() log_func(tb) message = 'Syntax Error in {}, line {}, offset {}:' log_func(message.format(e.filename, e.lineno, e.offset)) log_func('\t{}'.format(e.msg)) else: tb = get_traceback() log_func(tb) log_func('{}({})'.format(e.__class__.__name__, e)) e.logged = True class ErrorSignalHandler(logging.Handler): """ Emits signals for ERROR and WARNING level traces. """ def emit(self, record): if record.levelno == logging.ERROR: signal.send(signal.ERROR_LOGGED, self, record) elif record.levelno == logging.WARNING: signal.send(signal.WARNING_LOGGED, self, record) class InitHandler(logging.handlers.BufferingHandler): """ Used to buffer early logging records before a log file is created. """ def __init__(self, capacity): super(InitHandler, self).__init__(capacity) self.targets = [] def emit(self, record): record.indent_level = _indent_level super(InitHandler, self).emit(record) def flush(self): for target in self.targets: self.flush_to_target(target) self.buffer = [] def add_target(self, target): if target not in self.targets: self.targets.append(target) def flush_to_target(self, target): for record in self.buffer: target.emit(record) class LineFormatter(logging.Formatter): """ Logs each line of the message separately. """ def format(self, record): record.message = record.getMessage() if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) indent_level = getattr(record, 'indent_level', _indent_level) cur_indent = _indent_width * indent_level d = record.__dict__ parts = [] for line in record.message.split('\n'): line = ' ' * cur_indent + line d.update({'message': line.strip('\r')}) parts.append(self._fmt % d) return '\n'.join(parts) class ColorFormatter(LineFormatter): """ Formats logging records with color and prepends record info to each line of the message. BLUE for DEBUG logging level GREEN for INFO logging level YELLOW for WARNING logging level RED for ERROR logging level BOLD RED for CRITICAL logging level """ def __init__(self, fmt=None, datefmt=None): super(ColorFormatter, self).__init__(fmt, datefmt) template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}') template_text = '${color}' + template_text + RESET_COLOR self.fmt_template = string.Template(template_text) def format(self, record): self._set_color(COLOR_MAP[record.levelno]) return super(ColorFormatter, self).format(record) def _set_color(self, color): self._fmt = self.fmt_template.substitute(color=color) class BaseLogWriter(object): def __init__(self, name, level=logging.DEBUG): """ File-like object class designed to be used for logging from streams Each complete line (terminated by new line character) gets logged at DEBUG level. In complete lines are buffered until the next new line. :param name: The name of the logger that will be used. """ self.logger = logging.getLogger(name) self.buffer = '' if level == logging.DEBUG: self.do_write = self.logger.debug elif level == logging.INFO: self.do_write = self.logger.info elif level == logging.WARNING: self.do_write = self.logger.warning elif level == logging.ERROR: self.do_write = self.logger.error else: raise Exception('Unknown logging level: {}'.format(level)) def flush(self): # Defined to match the interface expected by pexpect. return self def close(self): if self.buffer: self.logger.debug(self.buffer) self.buffer = '' return self def __del__(self): # Ensure we don't lose bufferd output self.close() class LogWriter(BaseLogWriter): def write(self, data): data = data.replace('\r\n', '\n').replace('\r', '\n') if '\n' in data: parts = data.split('\n') parts[0] = self.buffer + parts[0] for part in parts[:-1]: self.do_write(part) self.buffer = parts[-1] else: self.buffer += data return self class LineLogWriter(BaseLogWriter): def write(self, data): self.do_write(data) class StreamLogger(threading.Thread): """ Logs output from a stream in a thread. """ def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter): super(StreamLogger, self).__init__() self.writer = klass(name, level) self.stream = stream self.daemon = True def run(self): line = self.stream.readline() while line: self.writer.write(line.rstrip('\n')) line = self.stream.readline() self.writer.close() ================================================ FILE: wa/utils/misc.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Miscellaneous functions that don't fit anywhere else. """ import errno import hashlib import importlib import inspect import logging import math import os import pathlib import random import re import shutil import string import subprocess import sys import traceback import uuid from contextlib import contextmanager from datetime import datetime, timedelta from functools import reduce # pylint: disable=redefined-builtin from operator import mul from tempfile import gettempdir, NamedTemporaryFile from time import sleep from io import StringIO # pylint: disable=wrong-import-position,unused-import from itertools import chain, cycle try: from shutil import which as find_executable except ImportError: from distutils.spawn import find_executable # pylint: disable=no-name-in-module, import-error from dateutil import tz # pylint: disable=wrong-import-order from devlib.exception import TargetError from devlib.utils.misc import (ABI_MAP, check_output, walk_modules, ensure_directory_exists, ensure_file_directory_exists, normalize, convert_new_lines, get_cpu_mask, unique, isiterable, getch, as_relative, ranges_to_list, memoized, list_to_ranges, list_to_mask, mask_to_list, which, to_identifier, safe_extract, LoadSyntaxError) check_output_logger = logging.getLogger('check_output') file_lock_logger = logging.getLogger('file_lock') at_write_logger = logging.getLogger('at_write') # Defined here rather than in wa.exceptions due to module load dependencies def diff_tokens(before_token, after_token): """ Creates a diff of two tokens. If the two tokens are the same it just returns returns the token (whitespace tokens are considered the same irrespective of type/number of whitespace characters in the token). If the tokens are numeric, the difference between the two values is returned. Otherwise, a string in the form [before -> after] is returned. """ if before_token.isspace() and after_token.isspace(): return after_token elif before_token.isdigit() and after_token.isdigit(): try: diff = int(after_token) - int(before_token) return str(diff) except ValueError: return "[%s -> %s]" % (before_token, after_token) elif before_token == after_token: return after_token else: return "[%s -> %s]" % (before_token, after_token) def prepare_table_rows(rows): """Given a list of lists, make sure they are prepared to be formatted into a table by making sure each row has the same number of columns and stringifying all values.""" rows = [list(map(str, r)) for r in rows] max_cols = max(list(map(len, rows))) for row in rows: pad = max_cols - len(row) for _ in range(pad): row.append('') return rows def write_table(rows, wfh, align='>', headers=None): # pylint: disable=R0914 """Write a column-aligned table to the specified file object.""" if not rows: return rows = prepare_table_rows(rows) num_cols = len(rows[0]) # cycle specified alignments until we have max_cols of them. This is # consitent with how such cases are handled in R, pandas, etc. it = cycle(align) align = [next(it) for _ in range(num_cols)] cols = list(zip(*rows)) col_widths = [max(list(map(len, c))) for c in cols] if headers: col_widths = [max([c, len(h)]) for c, h in zip(col_widths, headers)] row_format = ' '.join(['{:%s%s}' % (align[i], w) for i, w in enumerate(col_widths)]) row_format += '\n' if headers: wfh.write(row_format.format(*headers)) underlines = ['-' * len(h) for h in headers] wfh.write(row_format.format(*underlines)) for row in rows: wfh.write(row_format.format(*row)) def get_null(): """Returns the correct null sink based on the OS.""" return 'NUL' if os.name == 'nt' else '/dev/null' def get_traceback(exc=None): """ Returns the string with the traceback for the specifiec exc object, or for the current exception exc is not specified. """ if exc is None: exc = sys.exc_info() if not exc: return None tb = exc[2] sio = StringIO() traceback.print_tb(tb, file=sio) del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info return sio.getvalue() def _check_remove_item(the_list, item): """Helper function for merge_lists that implements checking wether an items should be removed from the list and doing so if needed. Returns ``True`` if the item has been removed and ``False`` otherwise.""" if not isinstance(item, str): return False if not item.startswith('~'): return False actual_item = item[1:] if actual_item in the_list: del the_list[the_list.index(actual_item)] return True VALUE_REGEX = re.compile(r'(\d+(?:\.\d+)?)\s*(\w*)') UNITS_MAP = { 's': 'seconds', 'ms': 'milliseconds', 'us': 'microseconds', 'ns': 'nanoseconds', 'V': 'volts', 'A': 'amps', 'mA': 'milliamps', 'J': 'joules', } def parse_value(value_string): """parses a string representing a numerical value and returns a tuple (value, units), where value will be either int or float, and units will be a string representing the units or None.""" match = VALUE_REGEX.search(value_string) if match: vs = match.group(1) value = float(vs) if '.' in vs else int(vs) us = match.group(2) units = UNITS_MAP.get(us, us) return (value, units) else: return (value_string, None) def get_meansd(values): """Returns mean and standard deviation of the specified values.""" if not values: return float('nan'), float('nan') mean = sum(values) / len(values) sd = math.sqrt(sum([(v - mean) ** 2 for v in values]) / len(values)) return mean, sd def geomean(values): """Returns the geometric mean of the values.""" return reduce(mul, values) ** (1.0 / len(values)) def capitalize(text): """Capitalises the specified text: first letter upper case, all subsequent letters lower case.""" if not text: return '' return text[0].upper() + text[1:].lower() def utc_to_local(dt): """Convert naive datetime to local time zone, assuming UTC.""" return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal()) def local_to_utc(dt): """Convert naive datetime to UTC, assuming local time zone.""" return dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc()) def load_class(classpath): """Loads the specified Python class. ``classpath`` must be a fully-qualified class name (i.e. namspaced under module/package).""" modname, clsname = classpath.rsplit('.', 1) mod = importlib.import_module(modname) cls = getattr(mod, clsname) if isinstance(cls, type): return cls else: raise ValueError(f'The classpath "{classpath}" does not point at a class: {cls}') def get_pager(): """Returns the name of the system pager program.""" pager = os.getenv('PAGER') if pager is None: pager = find_executable('less') if pager is None: pager = find_executable('more') return pager _bash_color_regex = re.compile('\x1b\[[0-9;]+m') def strip_bash_colors(text): return _bash_color_regex.sub('', text) def format_duration(seconds, sep=' ', order=['day', 'hour', 'minute', 'second']): # pylint: disable=dangerous-default-value """ Formats the specified number of seconds into human-readable duration. """ if isinstance(seconds, timedelta): td = seconds else: td = timedelta(seconds=seconds or 0) dt = datetime(1, 1, 1) + td result = [] for item in order: value = getattr(dt, item, None) if item == 'day': value -= 1 if not value: continue suffix = '' if value == 1 else 's' result.append('{} {}{}'.format(value, item, suffix)) return sep.join(result) if result else 'N/A' def get_article(word): """ Returns the appropriate indefinite article for the word (ish). .. note:: Indefinite article assignment in English is based on sound rather than spelling, so this will not work correctly in all case; e.g. this will return ``"a hour"``. """ return 'an' if word[0] in 'aoeiu' else 'a' def get_random_string(length): """Returns a random ASCII string of the specified length).""" return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)) def import_path(filepath, module_name=None): """ Programmatically import the given Python source file under the name ``module_name``. If ``module_name`` is not provided, a stable name based on ``filepath`` will be created. Note that this module name cannot be relied on, so don't make write import statements assuming this will be stable in the future. """ if not module_name: path = pathlib.Path(filepath).resolve() id_ = to_identifier(str(path)) module_name = f'wa._user_import.{id_}' try: return sys.modules[module_name] except KeyError: spec = importlib.util.spec_from_file_location(module_name, filepath) module = importlib.util.module_from_spec(spec) try: sys.modules[module_name] = module spec.loader.exec_module(module) except BaseException: sys.modules.pop(module_name, None) raise else: # We could return the "module" object, but that would not take into # account any manipulation the module did on sys.modules when # executing. To be consistent with the import statement, re-lookup # the module name. return sys.modules[module_name] def load_struct_from_python(filepath): """Parses a config structure from a .py file. The structure should be composed of basic Python types (strings, ints, lists, dicts, etc.).""" try: mod = import_path(filepath) except SyntaxError as e: raise LoadSyntaxError(e.message, filepath, e.lineno) else: return { k: v for k, v in inspect.getmembers(mod) if not k.startswith('_') } def open_file(filepath): """ Open the specified file path with the associated launcher in an OS-agnostic way. """ if os.name == 'nt': # Windows return os.startfile(filepath) # pylint: disable=no-member elif sys.platform == 'darwin': # Mac OSX return subprocess.call(['open', filepath]) else: # assume Linux or similar running a freedesktop-compliant GUI return subprocess.call(['xdg-open', filepath]) def sha256(path, chunk=2048): """Calculates SHA256 hexdigest of the file at the specified path.""" h = hashlib.sha256() with open(path, 'rb') as fh: buf = fh.read(chunk) while buf: h.update(buf) buf = fh.read(chunk) return h.hexdigest() def urljoin(*parts): return '/'.join(p.rstrip('/') for p in parts) # From: http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/ def istextfile(fileobj, blocksize=512): """ Uses heuristics to guess whether the given file is text or binary, by reading a single block of bytes from the file. If more than 30% of the chars in the block are non-text, or there are NUL ('\x00') bytes in the block, assume this is a binary file. """ _text_characters = (b''.join(chr(i) for i in range(32, 127)) + b'\n\r\t\f\b') block = fileobj.read(blocksize) if b'\x00' in block: # Files with null bytes are binary return False elif not block: # An empty file is considered a valid text file return True # Use translate's 'deletechars' argument to efficiently remove all # occurrences of _text_characters from the block nontext = block.translate(None, _text_characters) return float(len(nontext)) / len(block) <= 0.30 def categorize(v): if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'): return 'o' elif hasattr(v, 'items'): return 'm' elif isiterable(v): return 's' elif v is None: return 'n' else: return 'c' # pylint: disable=too-many-return-statements,too-many-branches def merge_config_values(base, other): """ This is used to merge two objects, typically when setting the value of a ``ConfigurationPoint``. First, both objects are categorized into c: A scalar value. Basically, most objects. These values are treated as atomic, and not mergeable. s: A sequence. Anything iterable that is not a dict or a string (strings are considered scalars). m: A key-value mapping. ``dict`` and its derivatives. n: ``None``. o: A mergeable object; this is an object that implements both ``merge_with`` and ``merge_into`` methods. The merge rules based on the two categories are then as follows: (c1, c2) --> c2 (s1, s2) --> s1 . s2 (m1, m2) --> m1 . m2 (c, s) --> [c] . s (s, c) --> s . [c] (s, m) --> s . [m] (m, s) --> [m] . s (m, c) --> ERROR (c, m) --> ERROR (o, X) --> o.merge_with(X) (X, o) --> o.merge_into(X) (X, n) --> X (n, X) --> X where: '.' means concatenation (for maps, contcationation of (k, v) streams then converted back into a map). If the types of the two objects differ, the type of ``other`` is used for the result. 'X' means "any category" '[]' used to indicate a literal sequence (not necessarily a ``list``). when this is concatenated with an actual sequence, that sequencies type is used. notes: - When a mapping is combined with a sequence, that mapping is treated as a scalar value. - When combining two mergeable objects, they're combined using ``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)). - Combining anything with ``None`` yields that value, irrespective of the order. So a ``None`` value is eqivalent to the corresponding item being omitted. - When both values are scalars, merging is equivalent to overwriting. - There is no recursion (e.g. if map values are lists, they will not be merged; ``other`` will overwrite ``base`` values). If complicated merging semantics (such as recursion) are required, they should be implemented within custom mergeable types (i.e. those that implement ``merge_with`` and ``merge_into``). While this can be used as a generic "combine any two arbitry objects" function, the semantics have been selected specifically for merging configuration point values. """ cat_base = categorize(base) cat_other = categorize(other) if cat_base == 'n': return other elif cat_other == 'n': return base if cat_base == 'o': return base.merge_with(other) elif cat_other == 'o': return other.merge_into(base) if cat_base == 'm': if cat_other == 's': return merge_sequencies([base], other) elif cat_other == 'm': return merge_maps(base, other) else: message = 'merge error ({}, {}): "{}" and "{}"' raise ValueError(message.format(cat_base, cat_other, base, other)) elif cat_base == 's': if cat_other == 's': return merge_sequencies(base, other) else: return merge_sequencies(base, [other]) else: # cat_base == 'c' if cat_other == 's': return merge_sequencies([base], other) elif cat_other == 'm': message = 'merge error ({}, {}): "{}" and "{}"' raise ValueError(message.format(cat_base, cat_other, base, other)) else: return other def merge_sequencies(s1, s2): return type(s2)(unique(chain(s1, s2))) def merge_maps(m1, m2): return type(m2)(chain(iter(m1.items()), iter(m2.items()))) def merge_dicts_simple(base, other): result = base.copy() for key, value in (other or {}).items(): result[key] = merge_config_values(result.get(key), value) return result def touch(path): with open(path, 'w'): pass def get_object_name(obj): if hasattr(obj, 'name'): return obj.name elif hasattr(obj, '__func__') and hasattr(obj, '__self__'): return '{}.{}'.format(get_object_name(obj.__self__.__class__), obj.__func__.__name__) elif hasattr(obj, 'func_name'): return obj.__name__ elif hasattr(obj, '__name__'): return obj.__name__ elif hasattr(obj, '__class__'): return obj.__class__.__name__ return None def resolve_cpus(name, target): """ Returns a list of cpu numbers that corresponds to a passed name. Allowed formats are: - 'big' - 'little' - ' e.g. 'A15' - 'cpuX' - 'all' - returns all cpus - '' - Empty name will also return all cpus """ cpu_list = list(range(target.number_of_cpus)) # Support for passing cpu no directly if isinstance(name, int): cpu = name if cpu not in cpu_list: message = 'CPU{} is not available, must be in {}' raise ValueError(message.format(cpu, cpu_list)) return [cpu] # Apply to all cpus if not name or name.lower() == 'all': return cpu_list # Deal with big.little substitution elif name.lower() == 'big': name = target.big_core if not name: raise ValueError('big core name could not be retrieved') elif name.lower() == 'little': name = target.little_core if not name: raise ValueError('little core name could not be retrieved') # Return all cores with specified name if name in target.core_names: return target.core_cpus(name) # Check if core number has been supplied. else: core_no = re.match('cpu([0-9]+)', name, re.IGNORECASE) if core_no: cpu = int(core_no.group(1)) if cpu not in cpu_list: message = 'CPU{} is not available, must be in {}' raise ValueError(message.format(cpu, cpu_list)) return [cpu] else: msg = 'Unexpected core name "{}"' raise ValueError(msg.format(name)) @memoized def resolve_unique_domain_cpus(name, target): """ Same as `resolve_cpus` above but only returns only the first cpu in each of the different frequency domains. Requires cpufreq. """ cpus = resolve_cpus(name, target) if not target.has('cpufreq'): msg = 'Device does not appear to support cpufreq; ' \ 'Cannot obtain cpu domain information' raise TargetError(msg) unique_cpus = [] domain_cpus = [] for cpu in cpus: if cpu not in domain_cpus: domain_cpus = target.cpufreq.get_related_cpus(cpu) if domain_cpus[0] not in unique_cpus: unique_cpus.append(domain_cpus[0]) return unique_cpus def format_ordered_dict(od): """ Provide a string representation of ordered dict that is similar to the regular dict representation, as that is more concise and easier to read than the default __str__ for OrderedDict. """ return '{{{}}}'.format(', '.join('{}={}'.format(k, v) for k, v in od.items())) @contextmanager def atomic_write_path(path, mode='w'): """ Gets a file path to write to which will be replaced with the original file path to simulate an atomic write from the point of view of other processes. This is achieved by writing to a tmp file and replacing the exiting file to prevent inconsistencies. """ tmp_file = None try: tmp_file = NamedTemporaryFile(mode=mode, delete=False, suffix=os.path.basename(path)) at_write_logger.debug('') yield tmp_file.name os.fsync(tmp_file.file.fileno()) finally: if tmp_file: tmp_file.close() at_write_logger.debug('Moving {} to {}'.format(tmp_file.name, path)) safe_move(tmp_file.name, path) def safe_move(src, dst): """ Taken from: https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python/ Rename a file from ``src`` to ``dst``. * Moves must be atomic. ``shutil.move()`` is not atomic. * Moves must work across filesystems and ``os.rename()`` can throw errors if run across filesystems. So we try ``os.rename()``, but if we detect a cross-filesystem copy, we switch to ``shutil.move()`` with some wrappers to make it atomic. """ try: os.rename(src, dst) except OSError as err: if err.errno == errno.EXDEV: # Generate a unique ID, and copy `` to the target directory # with a temporary name `..tmp`. Because we're copying # across a filesystem boundary, this initial copy may not be # atomic. We intersperse a random UUID so if different processes # are copying into ``, they don't overlap in their tmp copies. copy_id = uuid.uuid4() tmp_dst = "%s.%s.tmp" % (dst, copy_id) shutil.copyfile(src, tmp_dst) # Then do an atomic rename onto the new name, and clean up the # source image. os.rename(tmp_dst, dst) os.unlink(src) else: raise @contextmanager def lock_file(path, timeout=30): """ Enable automatic locking and unlocking of a file path given. Used to prevent synchronisation issues between multiple wa processes. Uses a default timeout of 30 seconds which should be overridden for files that are expect to be unavailable for longer periods of time. """ # Import here to avoid circular imports # pylint: disable=wrong-import-position,cyclic-import, import-outside-toplevel from wa.framework.exception import ResourceError locked = False l_file = 'wa-{}.lock'.format(path) l_file = os.path.join(gettempdir(), l_file.replace(os.path.sep, '_')) file_lock_logger.debug('Acquiring lock on "{}"'.format(path)) try: while timeout: try: open(l_file, 'x').close() locked = True file_lock_logger.debug('Lock acquired on "{}"'.format(path)) break except FileExistsError: msg = 'Failed to acquire lock on "{}" Retrying...' file_lock_logger.debug(msg.format(l_file)) sleep(1) timeout -= 1 else: msg = 'Failed to acquire lock file "{}" within the timeout. \n' \ 'If there are no other running WA processes please delete ' \ 'this file and retry.' raise ResourceError(msg.format(os.path.abspath(l_file))) yield finally: if locked and os.path.exists(l_file): os.remove(l_file) file_lock_logger.debug('Lock released "{}"'.format(path)) ================================================ FILE: wa/utils/postgres.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains additional casting and adaptation functions for several different datatypes and metadata types for use with the psycopg2 module. The casting functions will transform Postgresql data types into Python objects, and the adapters the reverse. They are named this way according to the psycopg2 conventions. For more information about the available adapters and casters in the standard psycopg2 module, please see: http://initd.org/psycopg/docs/extensions.html#sql-adaptation-protocol-objects """ import re import os try: from psycopg2 import InterfaceError from psycopg2.extensions import AsIs except ImportError: InterfaceError = None AsIs = None from wa.utils.types import level POSTGRES_SCHEMA_DIR = os.path.join(os.path.dirname(__file__), '..', 'commands', 'postgres_schemas') def cast_level(value, cur): # pylint: disable=unused-argument """Generic Level caster for psycopg2""" if not InterfaceError: raise ImportError('There was a problem importing psycopg2.') if value is None: return None m = re.match(r"([^\()]*)\((\d*)\)", value) name = str(m.group(1)) number = int(m.group(2)) if m: return level(name, number) else: raise InterfaceError("Bad level representation: {}".format(value)) def cast_vanilla(value, cur): # pylint: disable=unused-argument """Vanilla Type caster for psycopg2 Simply returns the string representation. """ if value is None: return None else: return str(value) # List functions and classes for adapting def adapt_level(a_level): """Generic Level Adapter for psycopg2""" return "{}({})".format(a_level.name, a_level.value) class ListOfLevel(object): value = None def __init__(self, a_level): self.value = a_level def return_original(self): return self.value def adapt_ListOfX(adapt_X): """This will create a multi-column adapter for a particular type. Note that the type must itself need to be in array form. Therefore this function serves to seaprate out individual lists into multiple big lists. E.g. if the X adapter produces array (a,b,c) then this adapter will take an list of Xs and produce a master array: ((a1,a2,a3),(b1,b2,b3),(c1,c2,c3)) Takes as its argument the adapter for the type which must produce an SQL array string. Note that you should NOT put the AsIs in the adapt_X function. The need for this function arises from the fact that we may want to actually handle list-creating types differently if they themselves are in a list, as in the example above, we cannot simply adopt a recursive strategy. Note that master_list is the list representing the array. Each element in the list will represent a subarray (column). If there is only one subarray following processing then the outer {} are stripped to give a 1 dimensional array. """ def adapter_function(param): if not AsIs: raise ImportError('There was a problem importing psycopg2.') param = param.value result_list = [] for element in param: # Where param will be a list of X's result_list.append(adapt_X(element)) test_element = result_list[0] num_items = len(test_element.split(",")) master_list = [] for x in range(num_items): master_list.append("") for element in result_list: element = element.strip("{").strip("}") element = element.split(",") for x in range(num_items): master_list[x] = master_list[x] + element[x] + "," if num_items > 1: master_sql_string = "{" else: master_sql_string = "" for x in range(num_items): # Remove trailing comma master_list[x] = master_list[x].strip(",") master_list[x] = "{" + master_list[x] + "}" master_sql_string = master_sql_string + master_list[x] + "," master_sql_string = master_sql_string.strip(",") if num_items > 1: master_sql_string = master_sql_string + "}" return AsIs("'{}'".format(master_sql_string)) return adapter_function def return_as_is(adapt_X): """Returns the AsIs appended function of the function passed This is useful for adapter functions intended to be used with the adapt_ListOfX function, which must return strings, as it allows them to be standalone adapters. """ if not AsIs: raise ImportError('There was a problem importing psycopg2.') def adapter_function(param): return AsIs("'{}'".format(adapt_X(param))) return adapter_function def adapt_vanilla(param): """Vanilla adapter: simply returns the string representation""" if not AsIs: raise ImportError('There was a problem importing psycopg2.') return AsIs("'{}'".format(param)) def create_iterable_adapter(array_columns, explicit_iterate=False): """Create an iterable adapter of a specified dimension If explicit_iterate is True, then it will be assumed that the param needs to be iterated upon via param.iteritems(). Otherwise it will simply be iterated vanilla. The value of array_columns will be equal to the number of indexed elements per item in the param iterable. E.g. a list of 3-element-long lists has 3 elements per item in the iterable (the master list) and therefore array_columns should be equal to 3. If array_columns is 0, then this indicates that the iterable contains single items. """ if not AsIs: raise ImportError('There was a problem importing psycopg2.') def adapt_iterable(param): """Adapts an iterable object into an SQL array""" final_string = "" # String stores a string representation of the array if param: if array_columns > 1: for index in range(array_columns): array_string = "" for item in param.iteritems(): array_string = array_string + str(item[index]) + "," array_string = array_string.strip(",") array_string = "{" + array_string + "}" final_string = final_string + array_string + "," final_string = final_string.strip(",") else: # Simply return each item in the array if explicit_iterate: for item in param.iteritems(): final_string = final_string + str(item) + "," else: for item in param: final_string = final_string + str(item) + "," return AsIs("'{{{}}}'".format(final_string)) return adapt_iterable # For reference only and future use def adapt_list(param): """Adapts a list into an array""" if not AsIs: raise ImportError('There was a problem importing psycopg2.') final_string = "" if param: for item in param: final_string = final_string + str(item) + "," final_string = "{" + final_string + "}" return AsIs("'{}'".format(final_string)) def get_schema(schemafilepath): with open(schemafilepath, 'r') as sqlfile: sql_commands = sqlfile.read() schema_major = None schema_minor = None # Extract schema version if present if sql_commands.startswith('--!VERSION'): splitcommands = sql_commands.split('!ENDVERSION!\n') schema_major, schema_minor = splitcommands[0].strip('--!VERSION!').split('.') schema_major = int(schema_major) schema_minor = int(schema_minor) sql_commands = splitcommands[1] return schema_major, schema_minor, sql_commands def get_database_schema_version(conn): with conn.cursor() as cursor: cursor.execute('''SELECT DatabaseMeta.schema_major, DatabaseMeta.schema_minor FROM DatabaseMeta;''') schema_major, schema_minor = cursor.fetchone() return (schema_major, schema_minor) def get_schema_versions(conn): schemafilepath = os.path.join(POSTGRES_SCHEMA_DIR, 'postgres_schema.sql') cur_major_version, cur_minor_version, _ = get_schema(schemafilepath) db_schema_version = get_database_schema_version(conn) return (cur_major_version, cur_minor_version), db_schema_version ================================================ FILE: wa/utils/revent.py ================================================ # Copyright 2016-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import struct import signal from datetime import datetime from collections import namedtuple from devlib.utils.misc import memoized from wa.framework.resource import Executable, NO_ONE, ResourceResolver from wa.utils.exec_control import once_per_class GENERAL_MODE = 0 GAMEPAD_MODE = 1 u16_struct = struct.Struct('= self.version >= 2: self.mode, = read_struct(fh, header_two_struct) if self.mode == GENERAL_MODE: self._read_devices(fh) elif self.mode == GAMEPAD_MODE: self._read_gamepad_info(fh) else: raise ValueError('Unexpected recording mode: {}'.format(self.mode)) self.num_events, = read_struct(fh, u64_struct) if self.version > 2: ts_sec = read_struct(fh, u64_struct)[0] ts_usec = read_struct(fh, u64_struct)[0] self.start_time = datetime.fromtimestamp(ts_sec + float(ts_usec) / 1000000) ts_sec = read_struct(fh, u64_struct)[0] ts_usec = read_struct(fh, u64_struct)[0] self.end_time = datetime.fromtimestamp(ts_sec + float(ts_usec) / 1000000) elif 2 > self.version >= 0: self.mode = GENERAL_MODE self._read_devices(fh) else: raise ValueError('Invalid recording version: {}'.format(self.version)) def _read_devices(self, fh): num_devices, = read_struct(fh, u32_struct) for _ in range(num_devices): self.device_paths.append(read_string(fh)) def _read_gamepad_info(self, fh): self.gamepad_device = UinputDeviceInfo(fh) self.device_paths.append('[GAMEPAD]') def _iter_events(self): if self.fh is None: msg = 'Attempting to iterate over events of a closed recording' raise RuntimeError(msg) self.fh.seek(self._events_start) if self.version >= 2: for _ in range(self.num_events): yield ReventEvent(self.fh) else: file_size = os.path.getsize(self.filepath) while self.fh.tell() < file_size: yield ReventEvent(self.fh, legacy=True) def __iter__(self): for event in self.events: yield event def __enter__(self): return self def __exit__(self, *args): self.close() def __del__(self): self.close() def get_revent_binary(abi): resolver = ResourceResolver() resolver.load() resource = Executable(NO_ONE, abi, 'revent') return resolver.get(resource) class ReventRecorder(object): # Share location of target executable across all instances target_executable = None def __init__(self, target): self.logger = logging.getLogger(self.__class__.__name__) self.target = target if not ReventRecorder.target_executable: ReventRecorder.target_executable = self._get_target_path(self.target) @once_per_class def deploy(self): if not ReventRecorder.target_executable: ReventRecorder.target_executable = self.target.get_installed('revent') host_executable = get_revent_binary(self.target.abi) ReventRecorder.target_executable = self.target.install(host_executable) @once_per_class def remove(self): if ReventRecorder.target_executable: self.target.uninstall('revent') def start_record(self, revent_file): command = f'{ReventRecorder.target_executable} record -s {revent_file}' self.logger.debug('Executing record command "%s"...', command) self.target.kick_off(command, self.target.is_rooted) def stop_record(self): self.target.killall('revent', signal.SIGINT, as_root=self.target.is_rooted) def replay(self, revent_file, timeout=None): self.target.killall('revent') command = f'{ReventRecorder.target_executable} replay {revent_file}' self.logger.debug('Executing replay command "%s" with %d seconds timeout...', command, timeout) self.target.execute(command, timeout=timeout) @memoized @staticmethod def _get_target_path(target): return target.get_installed('revent') ================================================ FILE: wa/utils/serializer.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This module contains wrappers for Python serialization modules for common formats that make it easier to serialize/deserialize WA Plain Old Data structures (serilizable WA classes implement ``to_pod()``/``from_pod()`` methods for converting between POD structures and Python class instances). The modifications to standard serilization procedures are: - mappings are deserialized as ``OrderedDict``\ 's rather than standard Python ``dict``\ 's. This allows for cleaner syntax in certain parts of WA configuration (e.g. values to be written to files can be specified as a dict, and they will be written in the order specified in the config). - regular expressions are automatically encoded/decoded. This allows for configuration values to be transparently specified as strings or regexes in the POD config. This module exports the "wrapped" versions of serialization libraries, and this should be imported and used instead of importing the libraries directly. i.e. :: from wa.utils.serializer import yaml pod = yaml.load(fh) instead of :: import yaml pod = yaml.load(fh) It's also possible to use the serializer directly:: from wa.utils import serializer pod = serializer.load(fh) This can also be used to ``dump()`` POD structures. By default, ``dump()`` will produce JSON, but ``fmt`` parameter may be used to specify an alternative format (``yaml`` or ``python``). ``load()`` will use the file plugin to guess the format, but ``fmt`` may also be used to specify it explicitly. """ # pylint: disable=unused-argument import os import re import json as _json from collections import OrderedDict from collections.abc import Hashable from datetime import datetime import dateutil.parser import yaml as _yaml # pylint: disable=wrong-import-order from yaml import MappingNode try: from yaml import FullLoader as _yaml_loader except ImportError: from yaml import Loader as _yaml_loader from yaml.constructor import ConstructorError # pylint: disable=redefined-builtin from past.builtins import basestring # pylint: disable=wrong-import-order from wa.framework.exception import SerializerSyntaxError from wa.utils.misc import isiterable from wa.utils.types import regex_type, none_type, level, cpu_mask __all__ = [ 'json', 'yaml', 'read_pod', 'dump', 'load', 'is_pod', 'POD_TYPES', ] POD_TYPES = [ list, tuple, dict, set, basestring, str, int, float, bool, OrderedDict, datetime, regex_type, none_type, level, cpu_mask, ] class WAJSONEncoder(_json.JSONEncoder): def default(self, obj): # pylint: disable=method-hidden,arguments-differ if isinstance(obj, regex_type): return 'REGEX:{}:{}'.format(obj.flags, obj.pattern) elif isinstance(obj, datetime): return 'DATET:{}'.format(obj.isoformat()) elif isinstance(obj, level): return 'LEVEL:{}:{}'.format(obj.name, obj.value) elif isinstance(obj, cpu_mask): return 'CPUMASK:{}'.format(obj.mask()) else: return _json.JSONEncoder.default(self, obj) class WAJSONDecoder(_json.JSONDecoder): def decode(self, s, **kwargs): # pylint: disable=arguments-differ d = _json.JSONDecoder.decode(self, s, **kwargs) def try_parse_object(v): if isinstance(v, basestring): if v.startswith('REGEX:'): _, flags, pattern = v.split(':', 2) return re.compile(pattern, int(flags or 0)) elif v.startswith('DATET:'): _, pattern = v.split(':', 1) return dateutil.parser.parse(pattern) elif v.startswith('LEVEL:'): _, name, value = v.split(':', 2) return level(name, value) elif v.startswith('CPUMASK:'): _, value = v.split(':', 1) return cpu_mask(value) return v def load_objects(d): if not hasattr(d, 'items'): return d pairs = [] for k, v in d.items(): if hasattr(v, 'items'): pairs.append((k, load_objects(v))) elif isiterable(v): pairs.append((k, [try_parse_object(i) for i in v])) else: pairs.append((k, try_parse_object(v))) return OrderedDict(pairs) return load_objects(d) class json(object): @staticmethod def dump(o, wfh, indent=4, *args, **kwargs): return _json.dump(o, wfh, cls=WAJSONEncoder, indent=indent, *args, **kwargs) @staticmethod def dumps(o, indent=4, *args, **kwargs): return _json.dumps(o, cls=WAJSONEncoder, indent=indent, *args, **kwargs) @staticmethod def load(fh, *args, **kwargs): try: return _json.load(fh, cls=WAJSONDecoder, object_pairs_hook=OrderedDict, *args, **kwargs) except ValueError as e: raise SerializerSyntaxError(e.args[0]) @staticmethod def loads(s, *args, **kwargs): try: return _json.loads(s, cls=WAJSONDecoder, object_pairs_hook=OrderedDict, *args, **kwargs) except ValueError as e: raise SerializerSyntaxError(e.args[0]) _mapping_tag = _yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG _regex_tag = 'tag:wa:regex' _level_tag = 'tag:wa:level' _cpu_mask_tag = 'tag:wa:cpu_mask' def _wa_dict_representer(dumper, data): return dumper.represent_mapping(_mapping_tag, iter(data.items())) def _wa_regex_representer(dumper, data): text = '{}:{}'.format(data.flags, data.pattern) return dumper.represent_scalar(_regex_tag, text) def _wa_level_representer(dumper, data): text = '{}:{}'.format(data.name, data.level) return dumper.represent_scalar(_level_tag, text) def _wa_cpu_mask_representer(dumper, data): return dumper.represent_scalar(_cpu_mask_tag, data.mask()) def _wa_regex_constructor(loader, node): value = loader.construct_scalar(node) flags, pattern = value.split(':', 1) return re.compile(pattern, int(flags or 0)) def _wa_level_constructor(loader, node): value = loader.construct_scalar(node) name, value = value.split(':', 1) return level(name, value) def _wa_cpu_mask_constructor(loader, node): value = loader.construct_scalar(node) return cpu_mask(value) class _WaYamlLoader(_yaml_loader): # pylint: disable=too-many-ancestors def construct_mapping(self, node, deep=False): if isinstance(node, MappingNode): self.flatten_mapping(node) if not isinstance(node, MappingNode): raise ConstructorError(None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) mapping = OrderedDict() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) if not isinstance(key, Hashable): raise ConstructorError("while constructing a mapping", node.start_mark, "found unhashable key", key_node.start_mark) value = self.construct_object(value_node, deep=deep) mapping[key] = value return mapping _yaml.add_representer(OrderedDict, _wa_dict_representer) _yaml.add_representer(regex_type, _wa_regex_representer) _yaml.add_representer(level, _wa_level_representer) _yaml.add_representer(cpu_mask, _wa_cpu_mask_representer) _yaml.add_constructor(_regex_tag, _wa_regex_constructor, Loader=_WaYamlLoader) _yaml.add_constructor(_level_tag, _wa_level_constructor, Loader=_WaYamlLoader) _yaml.add_constructor(_cpu_mask_tag, _wa_cpu_mask_constructor, Loader=_WaYamlLoader) _yaml.add_constructor(_mapping_tag, _WaYamlLoader.construct_yaml_map, Loader=_WaYamlLoader) class yaml(object): @staticmethod def dump(o, wfh, *args, **kwargs): return _yaml.dump(o, wfh, *args, **kwargs) @staticmethod def load(fh, *args, **kwargs): try: return _yaml.load(fh, *args, Loader=_WaYamlLoader, **kwargs) except _yaml.YAMLError as e: lineno = None if hasattr(e, 'problem_mark'): lineno = e.problem_mark.line # pylint: disable=no-member message = e.args[0] if (e.args and e.args[0]) else str(e) raise SerializerSyntaxError(message, lineno) loads = load class python(object): @staticmethod def dump(o, wfh, *args, **kwargs): raise NotImplementedError() @classmethod def load(cls, fh, *args, **kwargs): return cls.loads(fh.read()) @staticmethod def loads(s, *args, **kwargs): pod = {} try: exec(s, pod) # pylint: disable=exec-used except SyntaxError as e: raise SerializerSyntaxError(e.message, e.lineno) for k in list(pod.keys()): # pylint: disable=consider-iterating-dictionary if k.startswith('__'): del pod[k] return pod def read_pod(source, fmt=None): if isinstance(source, str): with open(source) as fh: return _read_pod(fh, fmt) elif hasattr(source, 'read') and (hasattr(source, 'name') or fmt): return _read_pod(source, fmt) else: message = 'source must be a path or an open file handle; got {}' raise ValueError(message.format(type(source))) def write_pod(pod, dest, fmt=None): if isinstance(dest, str): with open(dest, 'w') as wfh: return _write_pod(pod, wfh, fmt) elif hasattr(dest, 'write') and (hasattr(dest, 'name') or fmt): return _write_pod(pod, dest, fmt) else: message = 'dest must be a path or an open file handle; got {}' raise ValueError(message.format(type(dest))) def dump(o, wfh, fmt='json', *args, **kwargs): serializer = {'yaml': yaml, 'json': json, 'python': python, 'py': python, }.get(fmt) if serializer is None: raise ValueError('Unknown serialization format: "{}"'.format(fmt)) serializer.dump(o, wfh, *args, **kwargs) def load(s, fmt='json', *args, **kwargs): return read_pod(s, fmt=fmt) def _read_pod(fh, fmt=None): if fmt is None: fmt = os.path.splitext(fh.name)[1].lower().strip('.') if fmt == '': # Special case of no given file extension message = ("Could not determine format " "from file extension for \"{}\". " "Please specify it or modify the fmt parameter.") raise ValueError(message.format(getattr(fh, 'name', ''))) if fmt == 'yaml': return yaml.load(fh) elif fmt == 'json': return json.load(fh) elif fmt == 'py': return python.load(fh) else: raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(fh, 'name', ''))) def _write_pod(pod, wfh, fmt=None): if fmt is None: fmt = os.path.splitext(wfh.name)[1].lower().strip('.') if fmt == 'yaml': return yaml.dump(pod, wfh) elif fmt == 'json': return json.dump(pod, wfh) elif fmt == 'py': raise ValueError('Serializing to Python is not supported') else: raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(wfh, 'name', ''))) def is_pod(obj): if type(obj) not in POD_TYPES: # pylint: disable=unidiomatic-typecheck return False if hasattr(obj, 'items'): for k, v in obj.items(): if not (is_pod(k) and is_pod(v)): return False elif isiterable(obj): for v in obj: if not is_pod(v): return False return True class Podable(object): _pod_serialization_version = 0 @classmethod def from_pod(cls, pod): pod = cls._upgrade_pod(pod) instance = cls() instance._pod_version = pod.pop('_pod_version') # pylint: disable=protected-access return instance @classmethod def _upgrade_pod(cls, pod): _pod_serialization_version = pod.pop('_pod_serialization_version', None) or 0 while _pod_serialization_version < cls._pod_serialization_version: _pod_serialization_version += 1 upgrade = getattr(cls, '_pod_upgrade_v{}'.format(_pod_serialization_version)) pod = upgrade(pod) return pod def __init__(self): self._pod_version = self._pod_serialization_version def to_pod(self): pod = {} pod['_pod_version'] = self._pod_version pod['_pod_serialization_version'] = self._pod_serialization_version return pod ================================================ FILE: wa/utils/terminalsize.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Adapted from # https://gist.github.com/jtriley/1108174 # pylint: disable=bare-except,unpacking-non-sequence import os import shlex import struct import platform import subprocess def get_terminal_size(): """ getTerminalSize() - get width and height of console - works on linux,os x,windows,cygwin(windows) originally retrieved from: http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python """ current_os = platform.system() tuple_xy = None if current_os == 'Windows': tuple_xy = _get_terminal_size_windows() if tuple_xy is None: # needed for window's python in cygwin's xterm tuple_xy = _get_terminal_size_tput() if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'): tuple_xy = _get_terminal_size_linux() if tuple_xy is None or tuple_xy == (0, 0): tuple_xy = (80, 25) # assume "standard" terminal return tuple_xy def _get_terminal_size_windows(): # pylint: disable=unused-variable,redefined-outer-name,too-many-locals, import-outside-toplevel try: from ctypes import windll, create_string_buffer # stdin handle is -10 # stdout handle is -11 # stderr handle is -12 h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) if res: (bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) sizex = right - left + 1 sizey = bottom - top + 1 return sizex, sizey except: # NOQA pass def _get_terminal_size_tput(): # get terminal width # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window try: cols = int(subprocess.check_call(shlex.split('tput cols'))) rows = int(subprocess.check_call(shlex.split('tput lines'))) return (cols, rows) except: # NOQA pass def _get_terminal_size_linux(): # pylint: disable=import-outside-toplevel def ioctl_GWINSZ(fd): try: import fcntl import termios cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) return cr except: # NOQA pass cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) if not cr: try: fd = os.open(os.ctermid(), os.O_RDONLY) cr = ioctl_GWINSZ(fd) os.close(fd) except: # NOQA pass if not cr: try: cr = (os.environ['LINES'], os.environ['COLUMNS']) except: # NOQA return None return int(cr[1]), int(cr[0]) if __name__ == "__main__": sizex, sizey = get_terminal_size() print('width =', sizex, 'height =', sizey) ================================================ FILE: wa/utils/trace_cmd.py ================================================ # Copyright 2015-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import logging from itertools import chain from devlib.collector.ftrace import TRACE_MARKER_START, TRACE_MARKER_STOP from wa.utils.misc import isiterable from wa.utils.types import numeric logger = logging.getLogger('trace-cmd') class TraceCmdEvent(object): """ A single trace-cmd event. This will appear in the trace cmd report in the format :: -0 [000] 3284.126993: sched_rq_runnable_load: cpu=0 load=54 | | | | |___________| | | | | | thread cpu timestamp name body """ __slots__ = ['thread', 'reporting_cpu_id', 'timestamp', 'name', 'text', 'fields'] def __init__(self, thread, cpu_id, ts, name, body, parser=None): """ parameters: :thread: thread which generated the event :cpu: cpu on which the event has occurred :ts: timestamp of the event :name: the name of the event :bodytext: a string with the rest of the event text :parser: optionally, a function that will parse body text to populate this event's attributes The parser can be any callable that can be invoked with parser(event, text) Where ``event`` is this TraceCmdEvent instance, and ``text`` is the body text to be parsed. The parser should updated the passed event instance and not return anything (the return value will be ignored). Any exceptions raised by the parser will be silently ignored (note that this means that the event's attributes may be partially initialized). """ self.thread = thread self.reporting_cpu_id = int(cpu_id) self.timestamp = numeric(ts) self.name = name self.text = body self.fields = {} if parser: try: parser(self, self.text) except Exception: # pylint: disable=broad-except # unknown format assume user does not care or know how to # parse self.text pass def __getattr__(self, name): try: return self.fields[name] except KeyError: raise AttributeError(name) def __str__(self): return 'TE({} @ {})'.format(self.name, self.timestamp) __repr__ = __str__ class DroppedEventsEvent(object): __slots__ = ['thread', 'reporting_cpu_id', 'timestamp', 'name', 'text', 'fields'] def __init__(self, cpu_id): self.thread = None self.reporting_cpu_id = None self.timestamp = None self.name = 'DROPPED EVENTS DETECTED' self.text = None self.fields = {'cpu_id': int(cpu_id)} def __getattr__(self, name): try: return self.fields[name] except KeyError: raise AttributeError(name) def __str__(self): return 'DROPPED_EVENTS_ON_CPU{}'.format(self.cpu_id) __repr__ = __str__ def try_convert_to_numeric(v): try: if isiterable(v): return list(map(numeric, v)) else: return numeric(v) except ValueError: return v def default_body_parser(event, text): """ Default parser to attempt to use to parser body text for the event (i.e. after the "header" common to all events has been parsed). This assumes that the body is a whitespace-separated list of key=value pairs. The parser will attempt to convert the value into a numeric type, and failing that, keep it as string. """ parts = [e.rsplit(' ', 1) for e in text.strip().split('=')] parts = [p.strip() for p in chain.from_iterable(parts)] if not len(parts) % 2: i = iter(parts) for k, v in zip(i, i): try: v = int(v) except ValueError: pass event.fields[k] = v def regex_body_parser(regex, flags=0): """ Creates an event body parser form the specified regular expression (could be an ``re.RegexObject``, or a string). The regular expression should contain some named groups, as those will be extracted as the event attributes (unnamed groups and the reset of the match will be ignored). If the specified regex is a string, it will be compiled, in which case ``flags`` may be provided for the resulting regex object (see ``re`` standard module documentation). If regex is a pre-compiled object, flags will be ignored. """ if isinstance(regex, str): regex = re.compile(regex, flags) def regex_parser_func(event, text): match = regex.search(text) if match: for k, v in match.groupdict().items(): try: event.fields[k] = int(v) except ValueError: event.fields[k] = v return regex_parser_func def sched_switch_parser(event, text): """ Sched switch output may be presented in a couple of different formats. One is handled by a regex. The other format can *almost* be handled by the default parser, if it weren't for the ``==>`` that appears in the middle. """ if text.count('=') == 2: # old format regex = re.compile( r'(?P\S.*):(?P\d+) \[(?P\d+)\] (?P\S+)' r' ==> ' r'(?P\S.*):(?P\d+) \[(?P\d+)\]' ) parser_func = regex_body_parser(regex) return parser_func(event, text) else: # there are more than two "=" -- new format return default_body_parser(event, text.replace('==>', '')) def sched_stat_parser(event, text): """ sched_stat_* events unclude the units, "[ns]", in an otherwise regular key=value sequence; so the units need to be stripped out first. """ return default_body_parser(event, text.replace(' [ns]', '')) def sched_wakeup_parser(event, text): regex = re.compile(r'(?P\S+):(?P\d+) \[(?P\d+)\] success=(?P\d) CPU:(?P\d+)') parse_func = regex_body_parser(regex) return parse_func(event, text) # Maps event onto the corresponding parser for its body text. A parser may be # a callable with signature # # parser(event, bodytext) # # a re.RegexObject, or a string (in which case it will be compiled into a # regex). In case of a string/regex, its named groups will be used to populate # the event's attributes. EVENT_PARSER_MAP = { 'sched_stat_blocked': sched_stat_parser, 'sched_stat_iowait': sched_stat_parser, 'sched_stat_runtime': sched_stat_parser, 'sched_stat_sleep': sched_stat_parser, 'sched_stat_wait': sched_stat_parser, 'sched_switch': sched_switch_parser, 'sched_wakeup': sched_wakeup_parser, 'sched_wakeup_new': sched_wakeup_parser, } TRACE_EVENT_REGEX = re.compile(r'^\s+(?P\S+.*?\S+)\s+\[(?P\d+)\]\s+(?P[\d.]+):\s+' r'(?P[^:]+):\s+(?P.*?)\s*$') HEADER_REGEX = re.compile(r'^\s*(?:version|cpus)\s*=\s*([\d.]+)\s*$') DROPPED_EVENTS_REGEX = re.compile(r'CPU:(?P\d+) \[\d*\s*EVENTS DROPPED\]') EMPTY_CPU_REGEX = re.compile(r'CPU \d+ is empty') class TraceCmdParser(object): """ A parser for textual representation of ftrace as reported by trace-cmd """ def __init__(self, filter_markers=True, check_for_markers=True, events=None): """ Initialize a new trace parser. :param filter_markers: Specifies whether the trace before the start marker and after the stop marker should be filtered out (so only events between the two markers will be reported). This maybe overriden based on `check_for_markers` parameter of `parse()` :param check_for_markers: Check if the start/stop markers are present in the trace and ensure that `filter_markers` is `False` if they aren't :param events: A list of event names to be reported; if not specified, all events will be reported. """ self.filter_markers = filter_markers self.check_for_markers = check_for_markers self.events = events def parse(self, filepath): # pylint: disable=too-many-branches,too-many-locals """ This is a generator for the trace event stream. :param filepath: The path to the file containg text trace as reported by trace-cmd """ inside_maked_region = False # pylint: disable=superfluous-parens filters = [re.compile('^{}$'.format(e)) for e in (self.events or [])] filter_markers = self.filter_markers if filter_markers and self.check_for_markers: with open(filepath) as fh: for line in fh: if TRACE_MARKER_START in line: break else: # maker not found force filtering by marker to False filter_markers = False with open(filepath) as fh: for line in fh: # if processing trace markers, skip marker lines as well as all # lines outside marked region if filter_markers: if not inside_maked_region: if TRACE_MARKER_START in line: inside_maked_region = True continue elif TRACE_MARKER_STOP in line: inside_maked_region = False continue match = DROPPED_EVENTS_REGEX.search(line) if match: yield DroppedEventsEvent(match.group('cpu_id')) continue matched = False for rx in [HEADER_REGEX, EMPTY_CPU_REGEX]: match = rx.search(line) if match: logger.debug(line.strip()) matched = True break if matched: continue match = TRACE_EVENT_REGEX.search(line) if not match: logger.warning('Invalid trace event: "{}"'.format(line)) continue event_name = match.group('name') if filters: found = False for f in filters: if f.search(event_name): found = True break if not found: continue body_parser = EVENT_PARSER_MAP.get(event_name, default_body_parser) if isinstance(body_parser, (str, re.Pattern)): # pylint: disable=protected-access body_parser = regex_body_parser(body_parser) yield TraceCmdEvent(parser=body_parser, **match.groupdict()) def trace_has_marker(filepath, max_lines_to_check=2000000): with open(filepath) as fh: for i, line in enumerate(fh): if TRACE_MARKER_START in line: return True if i >= max_lines_to_check: break return False ================================================ FILE: wa/utils/types.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Routines for doing various type conversions. These usually embody some higher-level semantics than are present in standard Python types (e.g. ``boolean`` will convert the string ``"false"`` to ``False``, where as non-empty strings are usually considered to be ``True``). A lot of these are intended to specify type conversions declaratively in place like ``Parameter``'s ``kind`` argument. These are basically "hacks" around the fact that Python is not the best language to use for configuration. """ import os import re import numbers import shlex from bisect import insort from urllib.parse import quote, unquote # pylint: disable=no-name-in-module, import-error # pylint: disable=wrong-import-position from collections import defaultdict from collections.abc import MutableMapping from functools import total_ordering from past.builtins import basestring # pylint: disable=redefined-builtin from future.utils import with_metaclass from devlib.utils.types import identifier, boolean, integer, numeric, caseless_string from wa.utils.misc import (isiterable, list_to_ranges, list_to_mask, mask_to_list, ranges_to_list) def list_of_strs(value): """ Value must be iterable. All elements will be converted to strings. """ if not isiterable(value): raise ValueError(value) return list(map(str, value)) list_of_strings = list_of_strs def list_of_ints(value): """ Value must be iterable. All elements will be converted to ``int``\ s. """ if not isiterable(value): raise ValueError(value) return list(map(int, value)) list_of_integers = list_of_ints def list_of_numbers(value): """ Value must be iterable. All elements will be converted to numbers (either ``ints`` or ``float``\ s depending on the elements). """ if not isiterable(value): raise ValueError(value) return list(map(numeric, value)) def list_of_bools(value, interpret_strings=True): """ Value must be iterable. All elements will be converted to ``bool``\ s. .. note:: By default, ``boolean()`` conversion function will be used, which means that strings like ``"0"`` or ``"false"`` will be interpreted as ``False``. If this is undesirable, set ``interpret_strings`` to ``False``. """ if not isiterable(value): raise ValueError(value) if interpret_strings: return list(map(boolean, value)) else: return list(map(bool, value)) def list_of(type_): """Generates a "list of" callable for the specified type. The callable attempts to convert all elements in the passed value to the specified ``type_``, raising ``ValueError`` on error.""" def __init__(self, values): list.__init__(self, list(map(type_, values))) def append(self, value): list.append(self, type_(value)) def extend(self, other): list.extend(self, list(map(type_, other))) def from_pod(cls, pod): return cls(list(map(type_, pod))) def _to_pod(self): return self def __setitem__(self, idx, value): list.__setitem__(self, idx, type_(value)) return type('list_of_{}s'.format(type_.__name__), (list, ), { "__init__": __init__, "__setitem__": __setitem__, "append": append, "extend": extend, "to_pod": _to_pod, "from_pod": classmethod(from_pod), }) def list_or_string(value): """ Converts the value into a list of strings. If the value is not iterable, a one-element list with stringified value will be returned. """ if isinstance(value, str): return [value] else: try: return list(value) except ValueError: return [str(value)] def list_or_caseless_string(value): """ Converts the value into a list of ``caseless_string``'s. If the value is not iterable a one-element list with stringified value will be returned. """ if isinstance(value, str): return [caseless_string(value)] else: try: return list(map(caseless_string, value)) except ValueError: return [caseless_string(value)] def list_or(type_): """ Generator for "list or" types. These take either a single value or a list values and return a list of the specified ``type_`` performing the conversion on the value (if a single value is specified) or each of the elements of the specified list. """ list_type = list_of(type_) class list_or_type(list_type): def __init__(self, value): # pylint: disable=non-parent-init-called,super-init-not-called if isiterable(value): list_type.__init__(self, value) else: list_type.__init__(self, [value]) return list_or_type list_or_integer = list_or(integer) list_or_number = list_or(numeric) list_or_bool = list_or(boolean) regex_type = type(re.compile('')) none_type = type(None) def regex(value): """ Regular expression. If value is a string, it will be complied with no flags. If you want to specify flags, value must be precompiled. """ if isinstance(value, regex_type): return value else: return re.compile(value) def version_tuple(v): """ Converts a version string into a tuple of strings that can be used for natural comparison allowing delimeters of "-" and ".". """ v = v.replace('-', '.') return tuple(map(str, (v.split(".")))) def module_name_set(l): # noqa: E741 """ Converts a list of target modules into a set of module names, disregarding any configuration that may be present. """ modules = set() for m in l: if m and isinstance(m, dict): modules.update(m.keys()) else: modules.add(m) return modules __counters = defaultdict(int) def reset_counter(name=None, value=0): __counters[name] = value def reset_all_counters(value=0): for k in __counters: reset_counter(k, value) def counter(name=None): """ An auto incrementing value (kind of like an AUTO INCREMENT field in SQL). Optionally, the name of the counter to be used is specified (each counter increments separately). Counts start at 1, not 0. """ __counters[name] += 1 value = __counters[name] return value class arguments(list): """ Represents command line arguments to be passed to a program. """ def __init__(self, value=None): if isiterable(value): super(arguments, self).__init__(list(map(str, value))) elif isinstance(value, str): posix = os.name != 'nt' super(arguments, self).__init__(shlex.split(value, posix=posix)) elif value is None: super(arguments, self).__init__() else: super(arguments, self).__init__([str(value)]) def append(self, value): return super(arguments, self).append(str(value)) def extend(self, values): return super(arguments, self).extend(list(map(str, values))) def __str__(self): return ' '.join(self) class prioritylist(object): def __init__(self): """ Returns an OrderedReceivers object that externally behaves like a list but it maintains the order of its elements according to their priority. """ self.elements = defaultdict(list) self.is_ordered = True self.priorities = [] self.size = 0 self._cached_elements = None def add(self, new_element, priority=0): """ adds a new item in the list. - ``new_element`` the element to be inserted in the prioritylist - ``priority`` is the priority of the element which specifies its order within the List """ self._add_element(new_element, priority) def add_before(self, new_element, element): priority, index = self._priority_index(element) self._add_element(new_element, priority, index) def add_after(self, new_element, element): priority, index = self._priority_index(element) self._add_element(new_element, priority, index + 1) def index(self, element): return self._to_list().index(element) def remove(self, element): index = self.index(element) self.__delitem__(index) def _priority_index(self, element): for priority, elements in self.elements.items(): if element in elements: return (priority, elements.index(element)) raise IndexError(element) def _to_list(self): if self._cached_elements is None: self._cached_elements = [] for priority in self.priorities: self._cached_elements += self.elements[priority] return self._cached_elements def _add_element(self, element, priority, index=None): if index is None: self.elements[priority].append(element) else: self.elements[priority].insert(index, element) self.size += 1 self._cached_elements = None if priority not in self.priorities: insort(self.priorities, priority) def _delete(self, priority, priority_index): del self.elements[priority][priority_index] self.size -= 1 if not self.elements[priority]: self.priorities.remove(priority) self._cached_elements = None def __iter__(self): for priority in reversed(self.priorities): # highest priority first for element in self.elements[priority]: yield element def __getitem__(self, index): return self._to_list()[index] def __delitem__(self, index): if isinstance(index, numbers.Integral): index = int(index) if index < 0: index_range = [len(self) + index] else: index_range = [index] elif isinstance(index, slice): index_range = list(range(index.start or 0, index.stop, index.step or 1)) else: raise ValueError('Invalid index {}'.format(index)) current_global_offset = 0 priority_counts = dict(zip(self.priorities, [len(self.elements[p]) for p in self.priorities])) for priority in self.priorities: if not index_range: break priority_offset = 0 while index_range: del_index = index_range[0] if priority_counts[priority] + current_global_offset <= del_index: current_global_offset += priority_counts[priority] break within_priority_index = del_index - \ (current_global_offset + priority_offset) self._delete(priority, within_priority_index) priority_offset += 1 index_range.pop(0) def __len__(self): return self.size class toggle_set(set): """ A set that contains items to enable or disable something. A prefix of ``~`` is used to denote disabling something, for example the list ['apples', '~oranges', 'cherries'] enables both ``apples`` and ``cherries`` but disables ``oranges``. """ @staticmethod def from_pod(pod): return toggle_set(pod) @staticmethod def merge(dest, source): if '~~' in source: return toggle_set(source) dest = toggle_set(dest) for item in source: if item not in dest: #Disable previously enabled item if item.startswith('~') and item[1:] in dest: dest.remove(item[1:]) #Enable previously disabled item if not item.startswith('~') and ('~' + item) in dest: dest.remove('~' + item) dest.add(item) return dest def __init__(self, *args): if args: value = args[0] if isinstance(value, str): msg = 'invalid type for toggle_set: "{}"' raise TypeError(msg.format(type(value))) updated_value = [] for v in value: if v.startswith('~') and v[1:] in updated_value: updated_value.remove(v[1:]) elif not v.startswith('~') and ('~' + v) in updated_value: updated_value.remove(('~' + v)) updated_value.append(v) args = tuple([updated_value] + list(args[1:])) set.__init__(self, *args) def merge_with(self, other): return toggle_set.merge(self, other) def merge_into(self, other): return toggle_set.merge(other, self) def add(self, item): if item not in self: #Disable previously enabled item if item.startswith('~') and item[1:] in self: self.remove(item[1:]) #Enable previously disabled item if not item.startswith('~') and ('~' + item) in self: self.remove('~' + item) super(toggle_set, self).add(item) def values(self): """ returns a list of enabled items. """ return {item for item in self if not item.startswith('~')} def conflicts_with(self, other): """ Checks if any items in ``other`` conflict with items already in this list. Args: other (list): The list to be checked against Returns: A list of items in ``other`` that conflict with items in this list """ conflicts = [] for item in other: if item.startswith('~') and item[1:] in self: conflicts.append(item) if not item.startswith('~') and ('~' + item) in self: conflicts.append(item) return conflicts def to_pod(self): return list(self.values()) class ID(str): def merge_with(self, other): return '_'.join([self, other]) def merge_into(self, other): return '_'.join([other, self]) class obj_dict(MutableMapping): """ An object that behaves like a dict but each dict entry can also be accessed as an attribute. :param not_in_dict: A list of keys that can only be accessed as attributes """ @staticmethod def from_pod(pod): return obj_dict(pod) # pylint: disable=super-init-not-called def __init__(self, values=None, not_in_dict=None): self.__dict__['dict'] = dict(values or {}) self.__dict__['not_in_dict'] = not_in_dict if not_in_dict is not None else [] def to_pod(self): return self.__dict__['dict'] def __getitem__(self, key): if key in self.not_in_dict: msg = '"{}" is in the list keys that can only be accessed as attributes' raise KeyError(msg.format(key)) return self.__dict__['dict'][key] def __setitem__(self, key, value): self.__dict__['dict'][key] = value def __delitem__(self, key): del self.__dict__['dict'][key] def __len__(self): return sum(1 for _ in self) def __iter__(self): for key in self.__dict__['dict']: if key not in self.__dict__['not_in_dict']: yield key def __repr__(self): return repr(dict(self)) def __str__(self): return str(dict(self)) def __setattr__(self, name, value): self.__dict__['dict'][name] = value def __delattr__(self, name): if name in self: del self.__dict__['dict'][name] else: raise AttributeError("No such attribute: " + name) def __getattr__(self, name): if 'dict' not in self.__dict__: raise AttributeError("No such attribute: " + name) if name in self.__dict__['dict']: return self.__dict__['dict'][name] else: raise AttributeError("No such attribute: " + name) @total_ordering class level(object): """ A level has a name and behaves like a string when printed, however it also has a numeric value which is used in ordering comparisons. """ @staticmethod def from_pod(pod): name, value_part = pod.split('(') return level(name, numeric(value_part.rstrip(')'))) def __init__(self, name, value): self.name = caseless_string(name) self.value = numeric(value) def to_pod(self): return repr(self) def __str__(self): return str(self.name) def __repr__(self): return '{}({})'.format(self.name, self.value) def __hash__(self): return hash(self.name) def __eq__(self, other): if isinstance(other, level): return self.value == other.value elif isinstance(other, basestring): return self.name == other else: return self.value == other def __lt__(self, other): if isinstance(other, level): return self.value < other.value elif isinstance(other, basestring): return self.name < other else: return self.value < other def __ne__(self, other): if isinstance(other, level): return self.value != other.value elif isinstance(other, basestring): return self.name != other else: return self.value != other class _EnumMeta(type): def __str__(cls): return str(cls.levels) def __getattr__(cls, name): name = name.lower() if name in cls.__dict__: return cls.__dict__[name] def enum(args, start=0, step=1): """ Creates a class with attributes named by the first argument. Each attribute is a ``level`` so they behave is integers in comparisons. The value of the first attribute is specified by the second argument (``0`` if not specified). :: MyEnum = enum(['A', 'B', 'C']) is roughly equivalent of:: class MyEnum(object): A = 0 B = 1 C = 2 however it also implement some specialized behaviors for comparisons and instantiation. """ class Enum(with_metaclass(_EnumMeta, object)): @classmethod def from_pod(cls, pod): lv = level.from_pod(pod) for enum_level in cls.levels: if enum_level == lv: return enum_level msg = 'Unexpected value "{}" for enum.' raise ValueError(msg.format(pod)) def __new__(cls, name): for attr_name in dir(cls): if attr_name.startswith('__'): continue attr = getattr(cls, attr_name) if name == attr: return attr try: return Enum.from_pod(name) except ValueError: raise ValueError('Invalid enum value: {}'.format(repr(name))) reserved = ['values', 'levels', 'names'] levels = [] n = start for v in args: id_v = identifier(v) if id_v in reserved: message = 'Invalid enum level name "{}"; must not be in {}' raise ValueError(message.format(v, reserved)) name = caseless_string(id_v) lv = level(v, n) setattr(Enum, name, lv) levels.append(lv) n += step setattr(Enum, 'levels', levels) setattr(Enum, 'values', [lvl.value for lvl in levels]) setattr(Enum, 'names', [lvl.name for lvl in levels]) return Enum class ParameterDict(dict): """ A dict-like object that automatically encodes various types into a url safe string, and enforces a single type for the contents in a list. Each value is first prefixed with 2 letters to preserve type when encoding to a string. The format used is "value_type, value_dimension" e.g a 'list of floats' would become 'fl'. """ # Function to determine the appropriate prefix based on the parameters type @staticmethod def _get_prefix(obj): if isinstance(obj, str): prefix = 's' elif isinstance(obj, float): prefix = 'f' elif isinstance(obj, bool): prefix = 'b' elif isinstance(obj, int): prefix = 'i' elif obj is None: prefix = 'n' else: raise ValueError('Unable to encode {} {}'.format(obj, type(obj))) return prefix # Function to add prefix and urlencode a provided parameter. @staticmethod def _encode(obj): if isinstance(obj, list): t = type(obj[0]) prefix = ParameterDict._get_prefix(obj[0]) + 'l' for item in obj: if not isinstance(item, t): msg = 'Lists must only contain a single type, contains {} and {}' raise ValueError(msg.format(t, type(item))) obj = '0newelement0'.join(str(x) for x in obj) else: prefix = ParameterDict._get_prefix(obj) + 's' return quote(prefix + str(obj)) # Function to decode a string and return a value of the original parameter type. # pylint: disable=too-many-return-statements @staticmethod def _decode(string): value_type = string[:1] value_dimension = string[1:2] value = unquote(string[2:]) if value_dimension == 's': if value_type == 's': return str(value) elif value_type == 'b': return boolean(value) elif value_type == 'd': return int(value) elif value_type == 'f': return float(value) elif value_type == 'i': return int(value) elif value_type == 'n': return None elif value_dimension == 'l': return [ParameterDict._decode(value_type + 's' + x) for x in value.split('0newelement0')] else: raise ValueError('Unknown {} {}'.format(type(string), string)) def __init__(self, *args, **kwargs): for k, v in kwargs.items(): self.__setitem__(k, v) dict.__init__(self, *args) def __setitem__(self, name, value): dict.__setitem__(self, name, self._encode(value)) def __getitem__(self, name): return self._decode(dict.__getitem__(self, name)) def __contains__(self, item): return dict.__contains__(self, self._encode(item)) def __iter__(self): return iter((k, self._decode(v)) for (k, v) in list(self.items())) def iteritems(self): return self.__iter__() def get(self, name): return self._decode(dict.get(self, name)) def pop(self, key): return self._decode(dict.pop(self, key)) def popitem(self): key, value = dict.popitem(self) return (key, self._decode(value)) def iter_encoded_items(self): return dict.items(self) def get_encoded_value(self, name): return dict.__getitem__(self, name) def values(self): return [self[k] for k in dict.keys(self)] def update(self, *args, **kwargs): for d in list(args) + [kwargs]: for k, v in d.items(): self[k] = v class cpu_mask(object): """ A class to allow for a consistent way of representing a cpus mask with methods to provide conversions between the various required forms. The mask can be specified directly as a mask, as a list of cpus indexes or a sysfs-style string. """ @staticmethod def from_pod(pod): return cpu_mask(int(pod['cpu_mask'])) def __init__(self, cpus): self._mask = 0 if isinstance(cpus, int): self._mask = cpus elif isinstance(cpus, str): if cpus[:2] == '0x' or cpus[:2] == '0X': self._mask = int(cpus, 16) else: self._mask = list_to_mask(ranges_to_list(cpus)) elif isinstance(cpus, list): self._mask = list_to_mask(cpus) elif isinstance(cpus, cpu_mask): self._mask = cpus._mask # pylint: disable=protected-access else: msg = 'Unknown conversion from {} to cpu mask' raise ValueError(msg.format(cpus)) def __bool__(self): """Allow for use in comparisons to check if a mask has been set""" return bool(self._mask) __nonzero__ = __bool__ def __repr__(self): return 'cpu_mask: {}'.format(self.mask()) __str__ = __repr__ def list(self): """Returns a list of the indexes of bits that are set in the mask.""" return list(reversed(mask_to_list(self._mask))) def mask(self, prefix=True): """Returns a hex representation of the mask with an optional prefix""" if prefix: return hex(self._mask) else: return hex(self._mask)[2:] def ranges(self): """"Returns a sysfs-style ranges string""" return list_to_ranges(self.list()) def to_pod(self): return {'cpu_mask': self._mask} ================================================ FILE: wa/workloads/__init__.py ================================================ ================================================ FILE: wa/workloads/adobereader/__init__.py ================================================ # Copyright 2014-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from wa import ApkUiautoWorkload, Parameter from wa.utils.types import list_of_strs from wa.framework.exception import ValidationError class AdobeReader(ApkUiautoWorkload): name = 'adobereader' package_names = ['com.adobe.reader'] description = ''' The Adobe Reader workflow carries out the following typical productivity tasks. Test description: 1. Open a local file on the device 2. Gestures test: 2.1. Swipe down across the central 50% of the screen in 200 x 5ms steps 2.2. Swipe up across the central 50% of the screen in 200 x 5ms steps 2.3. Swipe right from the edge of the screen in 50 x 5ms steps 2.4. Swipe left from the edge of the screen in 50 x 5ms steps 2.5. Pinch out 50% in 100 x 5ms steps 2.6. Pinch In 50% in 100 x 5ms steps 3. Search test: Search ``document_name`` for each string in the ``search_string_list`` 4. Close the document Known working APK version: 19.7.1.10709 ''' default_search_strings = [ 'The quick brown fox jumps over the lazy dog', 'TEST_SEARCH_STRING', ] parameters = [ Parameter('document_name', kind=str, default='uxperf_test_doc.pdf', description=''' The document name to use for the Gesture and Search test. '''), Parameter('search_string_list', kind=list_of_strs, default=default_search_strings, constraint=lambda x: len(x) > 0, description=''' For each string in the list, a document search is performed using the string as the search term. At least one must be provided. '''), ] def __init__(self, target, **kwargs): super(AdobeReader, self).__init__(target, **kwargs) self.deployable_assets = [self.document_name] self.asset_directory = self.target.path.join(self.target.external_storage, 'Android', 'data', 'com.adobe.reader', 'files') def init_resources(self, context): super(AdobeReader, self).init_resources(context) # Only accept certain file formats if os.path.splitext(self.document_name.lower())[1] not in ['.pdf']: raise ValidationError('{} must be a PDF file'.format(self.document_name)) self.gui.uiauto_params['filename'] = self.document_name self.gui.uiauto_params['search_string_list'] = self.search_string_list def setup(self, context): super(AdobeReader, self).setup(context) # Need to re-deploy each time to adobe folder as it is wiped upon clearing app self.deploy_assets(context) ================================================ FILE: wa/workloads/adobereader/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.application' def packageName = "com.arm.wa.uiauto.adobereader" android { compileSdkVersion 28 buildToolsVersion '25.0.0' defaultConfig { applicationId "${packageName}" minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "${packageName}.apk" } } } } dependencies { implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' implementation(name: 'uiauto', ext: 'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/workloads/adobereader/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/workloads/adobereader/uiauto/app/src/main/java/com/arm/wa/uiauto/adobereader/UiAutomation.java ================================================ package com.arm.wa.uiauto.adobereader; /* Copyright 2014-2017 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import android.os.Bundle; import android.support.test.runner.AndroidJUnit4; import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiSelector; import com.arm.wa.uiauto.ApplaunchInterface; import com.arm.wa.uiauto.UxPerfUiAutomation.GestureTestParams; import com.arm.wa.uiauto.UxPerfUiAutomation.GestureType; import com.arm.wa.uiauto.BaseUiAutomation; import com.arm.wa.uiauto.ActionLogger; import com.arm.wa.uiauto.UiAutoUtils; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map.Entry; import java.util.concurrent.TimeUnit; import static com.arm.wa.uiauto.BaseUiAutomation.FindByCriteria.BY_DESC; import static com.arm.wa.uiauto.BaseUiAutomation.FindByCriteria.BY_ID; import static com.arm.wa.uiauto.BaseUiAutomation.FindByCriteria.BY_TEXT; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation implements ApplaunchInterface { private long networkTimeout = TimeUnit.SECONDS.toMillis(20); private long searchTimeout = TimeUnit.SECONDS.toMillis(20); protected Bundle parameters; protected String packageID; protected String filename; protected String[] searchStrings; @Before public void initialize(){ parameters = getParams(); packageID = getPackageID(parameters); filename = parameters.getString("filename"); searchStrings = parameters.getStringArray("search_string_list"); } @Test public void setup() throws Exception { runApplicationSetup(); } @Test public void runWorkload() throws Exception { openFile(filename); gesturesTest(); searchPdfTest(searchStrings); exitDocument(); } @Test public void teardown() throws Exception { unsetScreenOrientation(); } public void runApplicationSetup() throws Exception { setScreenOrientation(ScreenOrientation.NATURAL); dismissWelcomeView(); } // Returns the launch command for the application. public String getLaunchCommand() { String launch_command; launch_command = UiAutoUtils.createLaunchCommand(parameters); return launch_command; } // Pass the workload parameters, used for applaunch public void setWorkloadParameters(Bundle workload_parameters) { parameters = workload_parameters; packageID = getPackageID(parameters); } // Sets the UiObject that marks the end of the application launch. public UiObject getLaunchEndObject() { UiObject launchEndObject = mDevice.findObject(new UiSelector().textContains("RECENT") .className("android.widget.TextView")); return launchEndObject; } private void dismissWelcomeView() throws Exception { //Close optional sign in screen on newer versions (19.4.0.9813) UiObject closeWelcomeImage = mDevice.findObject(new UiSelector().resourceId(packageID + "optional_signing_cross_button") .className("android.widget.ImageView")); if (closeWelcomeImage.exists()) { closeWelcomeImage.click(); } // Deal with popup dialog message tutorial on newer versions UiObject tutorialDialog = mDevice.findObject(new UiSelector().resourceId(packageID + "close_card_button") .className("android.widget.ImageButton")); if (tutorialDialog.waitForExists(TimeUnit.SECONDS.toMillis(3))) { tutorialDialog.click(); } //Check to see if app is on home screen if (mDevice.findObject(new UiSelector().textContains("Home")).exists()) { return; } // Support older version (Last known working 16.1) UiObject welcomeView = getUiObjectByResourceId("android:id/content", "android.widget.FrameLayout"); welcomeView.swipeLeft(10); welcomeView.swipeLeft(10); UiObject onboarding_finish_button = mDevice.findObject(new UiSelector().resourceId("com.adobe.reader:id/onboarding_finish_button")); if (!onboarding_finish_button.exists()) { welcomeView.swipeLeft(10); } if (onboarding_finish_button.exists()) { clickUiObject(BY_ID, packageID + "onboarding_finish_button", "android.widget.Button"); } // Deal with popup dialog message promoting Dropbox access UiObject dropBoxDialog = mDevice.findObject(new UiSelector().text("Now you can access your Dropbox files.") .className("android.widget.TextView")); if (dropBoxDialog.exists()) { clickUiObject(BY_TEXT, "Remind Me Later", "android.widget.Button"); } // Also deal with the Dropbox CoachMark blue hint popup UiObject dropBoxcoachMark = mDevice.findObject(new UiSelector().description("CoachMark") .className("android.widget.LinearLayout")); if (dropBoxcoachMark.exists()) { tapDisplayCentre(); } UiObject actionBarTitle = mDevice.findObject(new UiSelector().textContains("My Documents") .className("android.widget.TextView")); actionBarTitle.waitForExists(uiAutoTimeout); } private void openFile(final String filename) throws Exception { String testTag = "open_document"; ActionLogger logger = new ActionLogger(testTag, parameters); UiObject fileObject = findFileObject(filename); logger.start(); fileObject.clickAndWaitForNewWindow(uiAutoTimeout); // Wait for the doc to open by waiting for the viewPager UiObject to exist UiObject viewPager = mDevice.findObject(new UiSelector().resourceId(packageID + "viewPager")); if (!viewPager.waitForExists(uiAutoTimeout)) { throw new UiObjectNotFoundException("Could not find \"viewPager\"."); }; logger.stop(); } private UiObject findFileObject(String filename) throws Exception { UiObject localFilesTab = mDevice.findObject(new UiSelector().textContains("LOCAL") .className("android.widget.TextView")); // Support older versions if (localFilesTab.exists()) { localFilesTab.click(); UiObject directoryPath = mDevice.findObject(new UiSelector().resourceId(packageID + "directoryPath")); if (!directoryPath.waitForExists(TimeUnit.SECONDS.toMillis(60))) { throw new UiObjectNotFoundException("Could not find any local files"); } // Click the button to search from the present file list view UiObject searchButton = mDevice.findObject(new UiSelector().resourceId(packageID + "split_pane_search")); if (!searchButton.waitForExists(TimeUnit.SECONDS.toMillis(10))) { throw new UiObjectNotFoundException("Could not find search button"); } searchButton.click(); // Force a refresh of files before searching uiDeviceSwipe(Direction.DOWN, 100); // Repeat as first swipe is sometimes ignored. uiDeviceSwipe(Direction.DOWN, 100); // Enter search text into the file searchBox. This will automatically filter the list. UiObject searchBox = mDevice.findObject(new UiSelector().resourceIdMatches(".*search_src_text") .classNameMatches("android.widget.Edit.*")); searchBox.setText(filename); // Open a file from a file list view by searching for UiObjects containing the doc title. return getUiObjectByText(filename, "android.widget.TextView"); } // Support for newer version UiObject searchNavigationButton = mDevice.findObject(new UiSelector() .resourceIdMatches(packageID + "bottombaritem_search") .className("android.widget.FrameLayout")); // On devices with larger screen sizes, layout is different if(!searchNavigationButton.exists()) { searchNavigationButton = getUiObjectByResourceId(packageID + "search_button_home", "android.widget.TextView"); } searchNavigationButton.click(); UiObject searchBox = mDevice.findObject(new UiSelector().resourceIdMatches(".*search_src_text") .classNameMatches("android.widget.EditText")); searchBox.click(); searchBox.setText(filename); mDevice.pressEnter(); // Remove file extension return getUiObjectByText(filename.substring(0,filename.lastIndexOf(".")), "android.widget.TextView"); } private void gesturesTest() throws Exception { String testTag = "gesture"; // Perform a range of swipe tests at different speeds and on different views LinkedHashMap testParams = new LinkedHashMap(); testParams.put("swipe_up", new GestureTestParams(GestureType.UIDEVICE_SWIPE, Direction.UP, 100)); testParams.put("swipe_down", new GestureTestParams(GestureType.UIDEVICE_SWIPE, Direction.DOWN, 100)); testParams.put("swipe_right", new GestureTestParams(GestureType.UIOBJECT_SWIPE, Direction.RIGHT, 50)); testParams.put("swipe_left", new GestureTestParams(GestureType.UIOBJECT_SWIPE, Direction.LEFT, 50)); testParams.put("pinch_out", new GestureTestParams(GestureType.PINCH, PinchType.OUT, 100, 50)); testParams.put("pinch_in", new GestureTestParams(GestureType.PINCH, PinchType.IN, 100, 50)); Iterator> it = testParams.entrySet().iterator(); // On some devices the first device swipe is ignored so perform it here // to prevent the first test gesture from being incorrectly logged uiDeviceSwipe(Direction.DOWN, 200); UiObject view = mDevice.findObject(new UiSelector().resourceId(packageID + "pageView")); if (!view.waitForExists(TimeUnit.SECONDS.toMillis(10))) { throw new UiObjectNotFoundException("Could not find page view"); } while (it.hasNext()) { Entry pair = it.next(); GestureType type = pair.getValue().gestureType; Direction dir = pair.getValue().gestureDirection; PinchType pinch = pair.getValue().pinchType; int steps = pair.getValue().steps; int percent = pair.getValue().percent; String runName = String.format(testTag + "_" + pair.getKey()); ActionLogger logger = new ActionLogger(runName, parameters); logger.start(); switch (type) { case UIDEVICE_SWIPE: uiDeviceSwipe(dir, steps); break; case UIOBJECT_SWIPE: uiObjectSwipe(view, dir, steps); break; case PINCH: uiObjectVertPinch(view, pinch, steps, percent); break; default: break; } logger.stop(); } } private void searchPdfTest(final String[] searchStrings) throws Exception { String testTag = "search"; // Tap the centre to bring up the menu gui // Sometimes the first tap wont register, so check if search appears // and if not, tap again before continuing tapDisplayCentre(); UiObject searchIcon = mDevice.findObject(new UiSelector().resourceId(packageID + "document_view_search_icon")); if (!searchIcon.waitForExists(uiAutoTimeout)) { tapDisplayCentre(); } if (!searchIcon.waitForExists(uiAutoTimeout)) { searchIcon = mDevice.findObject(new UiSelector().resourceId(packageID + "document_view_search")); if (!searchIcon.waitForExists(uiAutoTimeout)) { tapDisplayCentre(); } } for (int i = 0; i < searchStrings.length; i++) { String runName = String.format(testTag + "_string" + i); ActionLogger logger = new ActionLogger(runName, parameters); // Click on the search button icon and enter text in the box. This closes the keyboard // so click the box again and press Enter to start the search. searchIcon.clickAndWaitForNewWindow(); UiObject searchBox = mDevice.findObject(new UiSelector().resourceIdMatches(".*search_src_text") .className("android.widget.EditText")); searchBox.setText(searchStrings[i]); logger.start(); pressEnter(); // Check the progress bar icon. When this disappears the search is complete. UiObject progressBar = mDevice.findObject(new UiSelector().resourceId(packageID + "searchProgress") .className("android.widget.ProgressBar")); progressBar.waitForExists(uiAutoTimeout); progressBar.waitUntilGone(searchTimeout); logger.stop(); mDevice.pressBack(); } } private void exitDocument() throws Exception { // Return from the document view to the file list view by pressing home and my documents. UiObject homeButton = mDevice.findObject(new UiSelector().resourceId("android:id/home") .className("android.widget.ImageView")); // Newer version of app have a menu button instead of home button. UiObject menuButton = mDevice.findObject(new UiSelector().description("Navigate up")); if (!(homeButton.exists() || menuButton.exists())){ tapDisplayCentre(); } if (homeButton.exists()){ homeButton.click(); } else if (menuButton.exists()){ menuButton.click(); } else { mDevice.pressBack(); } UiObject searchBackButton = mDevice.findObject(new UiSelector().description("Collapse") .className("android.widget.ImageButton")); if (searchBackButton.exists()){ searchBackButton.click(); } else { mDevice.pressBack(); } } } ================================================ FILE: wa/workloads/adobereader/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/workloads/adobereader/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -e # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $0 &>/dev/null`; then script_path=`readlink -f $0 2>/dev/null` fi script_dir=`dirname $script_path` cd $script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wa dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $libs_dir cp $base_class $libs_dir # Build and return appropriate exit code if failed # gradle build ./gradlew clean :app:assembleDebug exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi # If successful move APK file to workload folder (overwrite previous) package=com.arm.wa.uiauto.adobereader rm -f ../$package if [[ -f app/build/outputs/apk/debug/$package.apk ]]; then cp app/build/outputs/apk/debug/$package.apk ../$package.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/workloads/adobereader/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Wed May 03 15:42:44 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/workloads/adobereader/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/workloads/adobereader/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/workloads/adobereader/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/workloads/aitutu/__init__.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re from wa import ApkUiautoWorkload from wa.framework.exception import WorkloadError class Aitutu(ApkUiautoWorkload): name = 'aitutu' package_names = ['com.antutu.aibenchmark'] regex_matches = [re.compile(r'Overall Score ([\d.]+)'), re.compile(r'Image Total Score ([\d.]+) ([\w]+) ([\w]+)'), re.compile(r'Image Speed Score ([\d.]+) ([\w]+) ([\w]+)'), re.compile(r'Image Accuracy Score ([\d.]+) ([\w]+) ([\w]+)'), re.compile(r'Object Total Score ([\d.]+) ([\w]+) ([\w]+)'), re.compile(r'Object Speed Score ([\d.]+) ([\w]+) ([\w]+)'), re.compile(r'Object Accuracy Score ([\d.]+) ([\w]+) ([\w]+)')] description = ''' Executes Aitutu Image Speed/Accuracy and Object Speed/Accuracy tests The Aitutu workflow carries out the following tasks. 1. Open Aitutu application 2. Download the resources for the test 3. Execute the tests Known working APK version: 1.0.3 ''' requires_network = True def __init__(self, target, **kwargs): super(Aitutu, self).__init__(target, **kwargs) self.gui.timeout = 1200000 def update_output(self, context): super(Aitutu, self).update_output(context) expected_results = len(self.regex_matches) logcat_file = context.get_artifact_path('logcat') with open(logcat_file, errors='replace') as fh: for line in fh: for regex in self.regex_matches: match = regex.search(line) if match: classifiers = {} result = match.group(1) if (len(match.groups())) > 1: entry = regex.pattern.rsplit(None, 3)[0] classifiers = {'model': match.group(3)} else: entry = regex.pattern.rsplit(None, 1)[0] context.add_metric(entry, result, '', lower_is_better=False, classifiers=classifiers) expected_results -= 1 if expected_results > 0: msg = "The Aitutu workload has failed. Expected {} scores, Detected {} scores." raise WorkloadError(msg.format(len(self.regex_matches), expected_results)) ================================================ FILE: wa/workloads/aitutu/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.application' def packageName = "com.arm.wa.uiauto.aitutu" android { compileSdkVersion 28 buildToolsVersion '25.0.0' defaultConfig { applicationId "${packageName}" minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "${packageName}.apk" } } } } dependencies { implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' implementation(name: 'uiauto', ext: 'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/workloads/aitutu/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/workloads/aitutu/uiauto/app/src/main/java/com/arm/wa/uiauto/aitutu/UiAutomation.java ================================================ /* Copyright 2013-2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto.aitutu; import android.app.Activity; import android.os.Bundle; import android.graphics.Rect; import android.support.test.runner.AndroidJUnit4; import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiSelector; import android.support.test.uiautomator.UiScrollable; import android.view.KeyEvent; import android.util.Log; import com.arm.wa.uiauto.BaseUiAutomation; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import java.util.concurrent.TimeUnit; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation { public static String TAG = "UXPERF"; @Test public void setup() throws Exception { clearPopups(); downloadAssets(); } @Test public void runWorkload() throws Exception { runBenchmark(); } @Test public void extractResults() throws Exception { getScores(); } public void clearPopups() throws Exception { UiObject agreement = mDevice.findObject(new UiSelector().textContains("NEXT")); agreement.waitForExists(5000); if (agreement.exists()) { agreement.click(); } UiSelector selector = new UiSelector(); UiObject cancel = mDevice.findObject(selector.textContains("CANCEL") .className("android.widget.Button")); cancel.waitForExists(60000); if (cancel.exists()){ cancel.click(); } } public void downloadAssets() throws Exception { UiSelector selector = new UiSelector(); //Start the tests UiObject start = mDevice.findObject(selector.textContains("Start Testing") .className("android.widget.TextView")); waitObject(start); start.click(); UiObject check = mDevice.findObject(selector.textContains("classification") .className("android.widget.TextView")); waitObject(check); } public void runBenchmark() throws Exception { UiSelector selector = new UiSelector(); //Wait for the tests to complete UiObject complete = mDevice.findObject(selector.text("TEST AGAIN") .className("android.widget.Button")); complete.waitForExists(1200000); } public void getScores() throws Exception { mDevice.waitForIdle(5000); UiSelector selector = new UiSelector(); //Declare the models used UiObject imageMod = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(1)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewAIModelName")); UiObject objectMod = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(4)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewAIModelName")); //Log the scores and models UiObject totalScore = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/textViewTotalScore")); Log.d(TAG, "Overall Score " + totalScore.getText()); UiObject imageTotal = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(1)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewSIDScore")); Log.d(TAG, "Image Total Score " + imageTotal.getText() + " Model " + imageMod.getText()); UiObject imageSpeed = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(2)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewBIDScore")); Log.d(TAG, "Image Speed Score " + imageSpeed.getText() + " Model " + imageMod.getText()); UiObject imageAcc = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(3)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewBIDScore")); Log.d(TAG, "Image Accuracy Score " + imageAcc.getText() + " Model " + imageMod.getText()); UiObject objectTotal = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(4)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewSIDScore")); Log.d(TAG, "Object Total Score " + objectTotal.getText() + " Model " + objectMod.getText()); UiObject objectSpeed = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(5)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewBIDScore")); Log.d(TAG, "Object Speed Score " + objectSpeed.getText() + " Model " + objectMod.getText()); UiObject objectAcc = mDevice.findObject(selector.resourceId("com.antutu.aibenchmark:id/recyclerView")) .getChild(selector.index(6)) .getChild(selector.resourceId("com.antutu.aibenchmark:id/textViewBIDScore")); Log.d(TAG, "Object Accuracy Score " + objectAcc.getText() + " Model " + objectMod.getText()); } } ================================================ FILE: wa/workloads/aitutu/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/workloads/aitutu/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -e # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $0 &>/dev/null`; then script_path=`readlink -f $0 2>/dev/null` fi script_dir=`dirname $script_path` cd $script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wa dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $libs_dir cp $base_class $libs_dir # Build and return appropriate exit code if failed # gradle build ./gradlew clean :app:assembleDebug exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi # If successful move APK file to workload folder (overwrite previous) package=com.arm.wa.uiauto.aitutu rm -f ../$package if [[ -f app/build/outputs/apk/debug/$package.apk ]]; then cp app/build/outputs/apk/debug/$package.apk ../$package.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/workloads/aitutu/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Wed May 03 15:42:44 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/workloads/aitutu/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/workloads/aitutu/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/workloads/aitutu/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/workloads/androbench/__init__.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re from wa import ApkUiautoWorkload from wa.framework.exception import WorkloadError class Androbench(ApkUiautoWorkload): name = 'androbench' package_names = ['com.andromeda.androbench2'] regex_matches = [re.compile(r'Sequential Read Score ([\d.]+)'), re.compile(r'Sequential Write Score ([\d.]+)'), re.compile(r'Random Read Score ([\d.]+)'), re.compile(r'Random Write Score ([\d.]+)'), re.compile(r'SQL Insert Score ([\d.]+)'), re.compile(r'SQL Update Score ([\d.]+)'), re.compile(r'SQL Delete Score ([\d.]+)')] description = ''' Executes storage performance benchmarks The Androbench workflow carries out the following typical productivity tasks. 1. Open Androbench application 2. Execute all memory benchmarks Known working APK version: 5.0.1 ''' def update_output(self, context): super(Androbench, self).update_output(context) expected_results = len(self.regex_matches) logcat_file = context.get_artifact_path('logcat') with open(logcat_file, errors='replace') as fh: for line in fh: for regex in self.regex_matches: match = regex.search(line) if match: result = float(match.group(1)) entry = regex.pattern.rsplit(None, 1)[0] context.add_metric(entry, result, 'MB/s', lower_is_better=False) expected_results -= 1 if expected_results > 0: msg = "The Androbench workload has failed. Expected {} scores, Detected {} scores." raise WorkloadError(msg.format(len(self.regex_matches), expected_results)) ================================================ FILE: wa/workloads/androbench/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.application' def packageName = "com.arm.wa.uiauto.androbench" android { compileSdkVersion 28 buildToolsVersion '25.0.0' defaultConfig { applicationId "${packageName}" minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "${packageName}.apk" } } } } dependencies { implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' implementation(name: 'uiauto', ext: 'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/workloads/androbench/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/workloads/androbench/uiauto/app/src/main/java/com/arm/wa/uiauto/androbench/UiAutomation.java ================================================ /* Copyright 2013-2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto.androbench; import android.app.Activity; import android.os.Bundle; import android.graphics.Rect; import android.support.test.runner.AndroidJUnit4; import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiSelector; import android.support.test.uiautomator.UiScrollable; import android.view.KeyEvent; import android.util.Log; import com.arm.wa.uiauto.BaseUiAutomation; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import java.util.concurrent.TimeUnit; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation { public static String TAG = "UXPERF"; @Test public void setup() throws Exception { dismissPermissions(); dismissAndroidVersionPopup(); } @Test public void dismissPermissions() throws Exception { UiSelector selector = new UiSelector(); UiObject cont = mDevice.findObject(selector.textContains("Continue")); if (cont.exists()) { cont.click(); } } @Test public void runWorkload() throws Exception { runBenchmark(); } @Test public void extractResults() throws Exception { getScores(); } public void runBenchmark() throws Exception { UiSelector selector = new UiSelector(); UiObject btn_microbench = mDevice.findObject(selector.textContains("Micro") .className("android.widget.Button")); if (btn_microbench.exists()) { btn_microbench.click(); } else { UiObject bench = mDevice.findObject(new UiSelector().resourceIdMatches("com.andromeda.androbench2:id/btnStartingBenchmarking")); Rect bounds = bench.getBounds(); mDevice.click(bounds.centerX(), bounds.centerY()); } UiObject btn_yes= mDevice.findObject(selector.textContains("Yes") .className("android.widget.Button")); btn_yes.click(); UiObject complete_text = mDevice.findObject(selector.text("Cancel") .className("android.widget.Button")); waitObject(complete_text); sleep(2); complete_text.click(); } public void getScores() throws Exception { UiSelector selector = new UiSelector(); UiObject seqRead = mDevice.findObject(selector.text("Sequential Read").fromParent(selector.index(1))); UiObject seqWrite = mDevice.findObject(selector.text("Sequential Write").fromParent(selector.index(1))); UiObject ranRead = mDevice.findObject(selector.text("Random Read").fromParent(selector.index(1))); UiObject ranWrite = mDevice.findObject(selector.text("Random Write").fromParent(selector.index(1))); UiObject sqlInsert = mDevice.findObject(selector.text("SQLite Insert").fromParent(selector.index(1))); UiObject sqlUpdate = mDevice.findObject(selector.text("SQLite Update").fromParent(selector.index(1))); UiObject sqlDelete = mDevice.findObject(selector.text("SQLite Delete").fromParent(selector.index(1))); UiScrollable scrollView = new UiScrollable(new UiSelector().scrollable(true)); Log.d(TAG, "Sequential Read Score " + seqRead.getText()); if (scrollView.exists()){scrollView.scrollIntoView(seqWrite); } Log.d(TAG, "Sequential Write Score " + seqWrite.getText()); if (scrollView.exists()){scrollView.scrollIntoView(ranRead);} Log.d(TAG, "Random Read Score " + ranRead.getText()); if (scrollView.exists()){scrollView.scrollIntoView(ranWrite);} Log.d(TAG, "Random Write Score " + ranWrite.getText()); if (scrollView.exists()){scrollView.scrollIntoView(sqlInsert);} Log.d(TAG, "SQL Insert Score " + sqlInsert.getText()); if (scrollView.exists()){scrollView.scrollIntoView(sqlUpdate);} Log.d(TAG, "SQL Update Score " + sqlUpdate.getText()); if (scrollView.exists()){scrollView.scrollIntoView(sqlDelete);} Log.d(TAG, "SQL Delete Score " + sqlDelete.getText()); } } ================================================ FILE: wa/workloads/androbench/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/workloads/androbench/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -e # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $0 &>/dev/null`; then script_path=`readlink -f $0 2>/dev/null` fi script_dir=`dirname $script_path` cd $script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wa dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $libs_dir cp $base_class $libs_dir # Build and return appropriate exit code if failed # gradle build ./gradlew clean :app:assembleDebug exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi # If successful move APK file to workload folder (overwrite previous) package=com.arm.wa.uiauto.androbench rm -f ../$package if [[ -f app/build/outputs/apk/debug/$package.apk ]]; then cp app/build/outputs/apk/debug/$package.apk ../$package.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/workloads/androbench/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Wed May 03 15:42:44 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/workloads/androbench/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/workloads/androbench/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/workloads/androbench/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/workloads/angrybirds_rio/__init__.py ================================================ # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa import ApkReventWorkload class AngryBirdsRio(ApkReventWorkload): name = 'angrybirds_rio' package_names = ['com.rovio.angrybirdsrio'] description = """ Angry Birds Rio game. The sequel to the very popular Android 2D game. """ ================================================ FILE: wa/workloads/antutu/__init__.py ================================================ # Copyright 2014-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import os import time from wa import ApkUiautoWorkload, ApkWorkload, WorkloadError, Parameter, ApkFile, File class Antutu(ApkUiautoWorkload): name = 'antutu' package_names = ['com.antutu.ABenchMark'] regex_matches_v7 = [re.compile(r'CPU Maths Score (.+)'), re.compile(r'CPU Common Score (.+)'), re.compile(r'CPU Multi Score (.+)'), re.compile(r'GPU Marooned Score (.+)'), re.compile(r'GPU Coastline Score (.+)'), re.compile(r'GPU Refinery Score (.+)'), re.compile(r'Data Security Score (.+)'), re.compile(r'Data Processing Score (.+)'), re.compile(r'Image Processing Score (.+)'), re.compile(r'User Experience Score (.+)'), re.compile(r'RAM Score (.+)'), re.compile(r'ROM Score (.+)')] regex_matches_v8 = [re.compile(r'CPU Mathematical Operations Score (.+)'), re.compile(r'CPU Common Algorithms Score (.+)'), re.compile(r'CPU Multi-Core Score (.+)'), re.compile(r'GPU Terracotta Score (.+)'), re.compile(r'GPU Coastline Score (.+)'), re.compile(r'GPU Refinery Score (.+)'), re.compile(r'Data Security Score (.+)'), re.compile(r'Data Processing Score (.+)'), re.compile(r'Image Processing Score (.+)'), re.compile(r'User Experience Score (.+)'), re.compile(r'RAM Access Score (.+)'), re.compile(r'ROM APP IO Score (.+)'), re.compile(r'ROM Sequential Read Score (.+)'), re.compile(r'ROM Sequential Write Score (.+)'), re.compile(r'ROM Random Access Score (.+)')] regex_matches_v9 = [re.compile(r'CPU Mathematical Operations Score (.+)'), re.compile(r'CPU Common Algorithms Score (.+)'), re.compile(r'CPU Multi-Core Score (.+)'), re.compile(r'GPU Terracotta Score (.+)'), re.compile(r'GPU Swordsman Score (.+)'), re.compile(r'GPU Refinery Score (.+)'), re.compile(r'Data Security Score (.+)'), re.compile(r'Data Processing Score (.+)'), re.compile(r'Image Processing Score (.+)'), re.compile(r'User Experience Score (.+)'), re.compile(r'Video CTS Score (.+)'), re.compile(r'Video Decode Score (.+)'), re.compile(r'RAM Access Score (.+)'), re.compile(r'ROM APP IO Score (.+)'), re.compile(r'ROM Sequential Read Score (.+)'), re.compile(r'ROM Sequential Write Score (.+)'), re.compile(r'ROM Random Access Score (.+)')] regex_matches_v10 = [re.compile(r'CPU Mathematical Operations Score (.+)'), re.compile(r'CPU Common Algorithms Score (.+)'), re.compile(r'CPU Multi-Core Score (.+)'), re.compile(r'GPU Seasons Score (.+)'), re.compile(r'GPU Coastline2 Score (.+)'), re.compile(r'RAM Bandwidth Score (.+)'), re.compile(r'RAM Latency Score (.+)'), re.compile(r'ROM APP IO Score (.+)'), re.compile(r'ROM Sequential Read Score (.+)'), re.compile(r'ROM Sequential Write Score (.+)'), re.compile(r'ROM Random Access Score (.+)'), re.compile(r'Data Security Score (.+)'), re.compile(r'Data Processing Score (.+)'), re.compile(r'Document Processing Score (.+)'), re.compile(r'Image Decoding Score (.+)'), re.compile(r'Image Processing Score (.+)'), re.compile(r'User Experience Score (.+)'), re.compile(r'Video CTS Score (.+)'), re.compile(r'Video Decoding Score (.+)'), re.compile(r'Video Editing Score (.+)')] description = ''' Executes Antutu 3D, UX, CPU and Memory tests Test description: 1. Open Antutu application 2. Execute Antutu benchmark Known working APK version: 8.0.4 ''' supported_versions = ['7.0.4', '7.2.0', '8.0.4', '8.1.9', '8.4.5', '9.1.6', '9.2.9', '10.0.1-OB1', '10.0.6-OB6', '10.1.9', '10.2.1', '10.4.3'] parameters = [ Parameter('version', kind=str, allowed_values=supported_versions, override=True, description=( '''Specify the version of Antutu to be run. If not specified, the latest available version will be used. ''') ) ] def __init__(self, device, **kwargs): super(Antutu, self).__init__(device, **kwargs) self.gui.timeout = 1200 def initialize(self, context): super(Antutu, self).initialize(context) #Install the supporting benchmark supporting_apk = context.get_resource(ApkFile(self, package='com.antutu.benchmark.full')) self.target.install(supporting_apk) #Ensure the orientation is set to portrait self.target.set_rotation(0) def setup(self, context): self.gui.uiauto_params['version'] = self.version super(Antutu, self).setup(context) def extract_scores(self, context, regex_version): #pylint: disable=no-self-use, too-many-locals cpu = [] gpu = [] ux = [] mem = [] expected_results = len(regex_version) logcat_file = context.get_artifact_path('logcat') with open(logcat_file, errors='replace') as fh: for line in fh: for regex in regex_version: match = regex.search(line) if match: try: result = float(match.group(1)) except ValueError: result = float('NaN') entry = regex.pattern.rsplit(None, 1)[0] context.add_metric(entry, result, lower_is_better=False) #Calculate group scores if 'CPU' in entry: if 'CPU' in entry: cpu.append(result) cpu_result = sum(cpu) if 'GPU' in entry: gpu.append(result) gpu_result = sum(gpu) if any([i in entry for i in ['Data', 'Document', 'Image', 'User', 'Video']]): ux.append(result) ux_result = sum(ux) if any([i in entry for i in ['RAM', 'ROM']]): mem.append(result) mem_result = sum(mem) expected_results -= 1 if expected_results > 0: msg = "The Antutu workload has failed. Expected {} scores, Detected {} scores." raise WorkloadError(msg.format(len(regex_version), expected_results)) context.add_metric('CPU Total Score', cpu_result, lower_is_better=False) context.add_metric('GPU Total Score', gpu_result, lower_is_better=False) context.add_metric('UX Total Score', ux_result, lower_is_better=False) context.add_metric('MEM Total Score', mem_result, lower_is_better=False) #Calculate overall scores overall_result = float(cpu_result + gpu_result + ux_result + mem_result) context.add_metric('Overall Score', overall_result, lower_is_better=False) def update_output(self, context): super(Antutu, self).update_output(context) if self.version.startswith('10'): self.extract_scores(context, self.regex_matches_v10) if self.version.startswith('9'): self.extract_scores(context, self.regex_matches_v9) if self.version.startswith('8'): self.extract_scores(context, self.regex_matches_v8) if self.version.startswith('7'): self.extract_scores(context, self.regex_matches_v7) class AntutuBDP(ApkWorkload): name = "antutu_bdp" description = ''' Workload for executing the BDP versions of the Antutu APK. This will only work with specific APKS provided by Antutu but does unlock command line automation and the capturing of a result file as opposed to using UiAuto and Regex. Known working version: 10.4.3-domesticAndroidFullBdp ''' activity = 'com.android.module.app.ui.start.ABenchMarkStart --ez isExternal true --es whereTo "test"' package_names = ['com.antutu.ABenchMark'] def initialize(self, context): super(AntutuBDP, self).initialize(context) #Set the files and directories we need self.test_dir = os.path.join(self.target.external_storage_app_dir, 'com.antutu.ABenchMark', 'files', '.antutu') self.settings_xml = context.get_resource(File(self, 'settings.xml')) self.result_file = os.path.join(self.target.external_storage, 'Documents', 'antutu', 'last_result.json') self.output_file = os.path.join(context.output_directory, 'antutu_results.json') self.supporting_apk = context.get_resource(ApkFile(self, package='com.antutu.benchmark.full')) def setup(self, context): super(AntutuBDP, self).setup(context) #Install the supporting benchmark self.logger.info("Installing the supporting APK") self.target.install(self.supporting_apk) #Launch the apk to initialize the test dir, then kill it self.target.execute('am start {}/com.android.module.app.ui.test.activity.ActivityScoreBench'.format(self.apk.package)) self.target.execute('am force-stop {}'.format(self.apk.package)) #Copy the settings.xml to the test dir self.target.push(self.settings_xml, self.test_dir) #Ensure the orientation is set to portrait self.target.set_rotation(0) #Remove any pre-existing test results if self.target.file_exists(self.result_file): self.target.execute('rm {}'.format(self.result_file)) def run(self, context): super(AntutuBDP, self).run(context) #Launch the tests self.target.execute('am start -n {}/{}'.format(self.apk.package, self.activity)) #Wait 10 minutes, then begin polling every 30s for the test result to appear self.logger.debug("Waiting 10 minutes before starting to poll for the results file.") time.sleep(600) #Poll for another 15 minutes, 20 minutes total before timing out end_time = time.time() + 900 while time.time() < end_time: if self.target.file_exists(self.result_file): self.logger.debug("Result file found.") return True time.sleep(30) self.logger.debug("File not found yet. Continuing polling.") self.logger.warning("File not found within the configured timeout period. Exiting test.") return False def update_output(self, context): super(AntutuBDP, self).update_output(context) self.target.pull(self.result_file, self.output_file) context.add_artifact('antutu_result', self.output_file, kind='data', description='Antutu output from target') def teardown(self, context): super(AntutuBDP, self).teardown(context) #Remove the test results file self.target.execute('rm {}'.format(self.result_file)) #Remove the supporting APK if self.target.is_installed(self.supporting_apk): self.target.uninstall(self.supporting_apk) ================================================ FILE: wa/workloads/antutu/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.application' def packageName = "com.arm.wa.uiauto.antutu" android { compileSdkVersion 28 buildToolsVersion "28.0.3" defaultConfig { applicationId "${packageName}" minSdkVersion 18 targetSdkVersion 28 versionCode 1 versionName "1.0" testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { release { minifyEnabled false proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' } applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "${packageName}.apk" } } } } dependencies { implementation fileTree(dir: 'libs', include: ['*.jar']) implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' implementation(name: 'uiauto', ext:'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/workloads/antutu/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/workloads/antutu/uiauto/app/src/main/java/com/arm/wa/uiauto/antutu/UiAutomation.java ================================================ /* Copyright 2013-2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto.antutu; import android.app.Activity; import android.os.Bundle; import android.support.test.runner.AndroidJUnit4; import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiScrollable; import android.support.test.uiautomator.UiSelector; import android.util.Log; import com.arm.wa.uiauto.BaseUiAutomation; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import java.util.HashSet; import java.util.Set; import java.util.concurrent.TimeUnit; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation { public static String TAG = "UXPERF"; public static String TestButton5 = "com.antutu.ABenchMark:id/start_test_region"; public static String TestButton6 = "com.antutu.ABenchMark:id/start_test_text"; private static int initialTimeoutSeconds = 20; protected Bundle parameters; protected String version; @Before public void initialize(){ parameters = getParams(); version = parameters.getString("version"); } @Test public void setup() throws Exception { dismissAndroidVersionPopup(); clearPopups(); } @Test public void runWorkload() throws Exception{ hitTest(); waitforCompletion(); } @Test public void extractResults() throws Exception{ if (version.startsWith("9")){ getScoresv9(); } else if (version.startsWith("8")){ getScoresv8(); } else { getScoresv7(); } } public void hitTest() throws Exception { UiObject testbutton = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/main_test_start_title")); testbutton.click(); sleep(1); } public void clearPopups() throws Exception { UiObject agreement = mDevice.findObject(new UiSelector().textContains("NEXT")); agreement.waitForExists(5000); if (agreement.exists()){ agreement.click(); } UiObject cancel = mDevice.findObject(new UiSelector().textContains("CANCEL")); cancel.waitForExists(5000); if (cancel.exists()){ cancel.click(); } } public void waitforCompletion() throws Exception { UiObject totalScore = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/textViewTotalScore")); totalScore.waitForExists(600000); } public void getScoresv7() throws Exception { //Expand, Extract and Close CPU sub scores UiObject cpuscores = mDevice.findObject(new UiSelector().text("CPU")); cpuscores.click(); UiObject cpumaths = mDevice.findObject(new UiSelector().text("CPU Mathematics Score").fromParent(new UiSelector().index(3))); UiObject cpucommon = mDevice.findObject(new UiSelector().text("CPU Common Use Score").fromParent(new UiSelector().index(3))); UiObject cpumulti = mDevice.findObject(new UiSelector().text("CPU Multi-Core Score").fromParent(new UiSelector().index(3))); Log.d(TAG, "CPU Maths Score " + cpumaths.getText()); Log.d(TAG, "CPU Common Score " + cpucommon.getText()); Log.d(TAG, "CPU Multi Score " + cpumulti.getText()); cpuscores.click(); //Expand, Extract and Close GPU sub scores UiObject gpuscores = mDevice.findObject(new UiSelector().text("GPU")); gpuscores.click(); UiObject gpumaroon = mDevice.findObject(new UiSelector().text("3D [Marooned] Score").fromParent(new UiSelector().index(3))); UiObject gpucoast = mDevice.findObject(new UiSelector().text("3D [Coastline] Score").fromParent(new UiSelector().index(3))); UiObject gpurefinery = mDevice.findObject(new UiSelector().text("3D [Refinery] Score").fromParent(new UiSelector().index(3))); Log.d(TAG, "GPU Marooned Score " + gpumaroon.getText()); Log.d(TAG, "GPU Coastline Score " + gpucoast.getText()); Log.d(TAG, "GPU Refinery Score " + gpurefinery.getText()); gpuscores.click(); //Expand, Extract and Close UX sub scores UiObject uxscores = mDevice.findObject(new UiSelector().text("UX")); uxscores.click(); UiObject security = mDevice.findObject(new UiSelector().text("Data Security Score").fromParent(new UiSelector().index(3))); UiObject dataprocessing = mDevice.findObject(new UiSelector().text("Data Processing Score").fromParent(new UiSelector().index(3))); UiObject imageprocessing = mDevice.findObject(new UiSelector().text("Image Processing Score").fromParent(new UiSelector().index(3))); UiObject uxscore = mDevice.findObject(new UiSelector().text("User Experience Score").fromParent(new UiSelector().index(3))); Log.d(TAG, "Data Security Score " + security.getText()); Log.d(TAG, "Data Processing Score " + dataprocessing.getText()); Log.d(TAG, "Image Processing Score " + imageprocessing.getText()); Log.d(TAG, "User Experience Score " + uxscore.getText()); uxscores.click(); //Expand, Extract and Close MEM sub scores UiObject memscores = mDevice.findObject(new UiSelector().text("MEM")); memscores.click(); UiObject ramscore = mDevice.findObject(new UiSelector().text("RAM Score").fromParent(new UiSelector().index(3))); UiObject romscore = mDevice.findObject(new UiSelector().text("ROM Score").fromParent(new UiSelector().index(3))); Log.d(TAG, "RAM Score " + ramscore.getText()); Log.d(TAG, "ROM Score " + romscore.getText()); memscores.click(); } public void getScoresv8() throws Exception { UiScrollable list = new UiScrollable(new UiSelector().scrollable(true)); //Expand, Extract and Close CPU sub scores UiObject cpuscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(2)) .getChild(new UiSelector().index(4)); cpuscores.click(); UiObject cpumaths = mDevice.findObject(new UiSelector().text("CPU Mathematical Operations").fromParent(new UiSelector().index(1))); UiObject cpucommon = mDevice.findObject(new UiSelector().text("CPU Common Algorithms").fromParent(new UiSelector().index(1))); UiObject cpumulti = mDevice.findObject(new UiSelector().text("CPU Multi-Core").fromParent(new UiSelector().index(1))); Log.d(TAG, "CPU Mathematical Operations Score " + cpumaths.getText()); Log.d(TAG, "CPU Common Algorithms Score " + cpucommon.getText()); Log.d(TAG, "CPU Multi-Core Score " + cpumulti.getText()); cpuscores.click(); //Expand, Extract and Close GPU sub scores UiObject gpuscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(3)) .getChild(new UiSelector().index(4)); gpuscores.click(); UiObject gputerracotta = mDevice.findObject(new UiSelector().text("Terracotta - Vulkan").fromParent(new UiSelector().index(1))); UiObject gpucoast = mDevice.findObject(new UiSelector().text("Coastline - Vulkan").fromParent(new UiSelector().index(1))); UiObject gpurefinery = mDevice.findObject(new UiSelector().text("Refinery - OpenGL ES3.1+AEP").fromParent(new UiSelector().index(1))); Log.d(TAG, "GPU Terracotta Score " + gputerracotta.getText()); Log.d(TAG, "GPU Coastline Score " + gpucoast.getText()); Log.d(TAG, "GPU Refinery Score " + gpurefinery.getText()); gpuscores.click(); //Expand, Extract and Close UX sub scores UiObject uxscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(5)) .getChild(new UiSelector().index(4)); uxscores.click(); UiObject security = mDevice.findObject(new UiSelector().text("Data Security").fromParent(new UiSelector().index(1))); UiObject dataprocessing = mDevice.findObject(new UiSelector().text("Data Processing").fromParent(new UiSelector().index(1))); UiObject imageprocessing = mDevice.findObject(new UiSelector().text("Image Processing").fromParent(new UiSelector().index(1))); UiObject uxscore = mDevice.findObject(new UiSelector().text("User Experience").fromParent(new UiSelector().index(1))); if (!security.exists() && list.waitForExists(60)) { list.scrollIntoView(security); } Log.d(TAG, "Data Security Score " + security.getText()); if (!dataprocessing.exists() && list.waitForExists(60)) { list.scrollIntoView(dataprocessing); } Log.d(TAG, "Data Processing Score " + dataprocessing.getText()); if (!imageprocessing.exists() && list.waitForExists(60)) { list.scrollIntoView(imageprocessing); } Log.d(TAG, "Image Processing Score " + imageprocessing.getText()); if (!uxscore.exists() && list.waitForExists(60)) { list.scrollIntoView(uxscore); } Log.d(TAG, "User Experience Score " + uxscore.getText()); list.scrollToBeginning(10); uxscores.click(); //Expand, Extract and Close MEM sub scores UiObject memscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(4)) .getChild(new UiSelector().index(4)); memscores.click(); UiObject ramaccess = mDevice.findObject(new UiSelector().text("RAM Access").fromParent(new UiSelector().index(1))); UiObject romapp = mDevice.findObject(new UiSelector().text("ROM APP IO").fromParent(new UiSelector().index(1))); UiObject romread = mDevice.findObject(new UiSelector().text("ROM Sequential Read").fromParent(new UiSelector().index(1))); UiObject romwrite = mDevice.findObject(new UiSelector().text("ROM Sequential Write").fromParent(new UiSelector().index(1))); UiObject romaccess = mDevice.findObject(new UiSelector().text("ROM Random Access").fromParent(new UiSelector().index(1))); if (!ramaccess.exists() && list.waitForExists(60)) { list.scrollIntoView(ramaccess); } Log.d(TAG, "RAM Access Score " + ramaccess.getText()); if (!romapp.exists() && list.waitForExists(60)) { list.scrollIntoView(romapp); } Log.d(TAG, "ROM APP IO Score " + romapp.getText()); if (!romread.exists() && list.waitForExists(60)) { list.scrollIntoView(romread); } Log.d(TAG, "ROM Sequential Read Score " + romread.getText()); if (!romwrite.exists() && list.waitForExists(60)) { list.scrollIntoView(romwrite); } Log.d(TAG, "ROM Sequential Write Score " + romwrite.getText()); if (!romaccess.exists() && list.waitForExists(60)) { list.scrollIntoView(romaccess); } Log.d(TAG, "ROM Random Access Score " + romaccess.getText()); list.scrollToBeginning(10); memscores.click(); } public void getScoresv9() throws Exception { UiScrollable list = new UiScrollable(new UiSelector().scrollable(true)); //Expand, Extract and Close CPU sub scores UiObject cpuscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(2)) .getChild(new UiSelector().index(4)); cpuscores.click(); UiObject cpumaths = mDevice.findObject(new UiSelector().text("CPU Mathematical Operations").fromParent(new UiSelector().index(1))); UiObject cpucommon = mDevice.findObject(new UiSelector().text("CPU Common Algorithms").fromParent(new UiSelector().index(1))); UiObject cpumulti = mDevice.findObject(new UiSelector().text("CPU Multi-Core").fromParent(new UiSelector().index(1))); Log.d(TAG, "CPU Mathematical Operations Score " + cpumaths.getText()); Log.d(TAG, "CPU Common Algorithms Score " + cpucommon.getText()); Log.d(TAG, "CPU Multi-Core Score " + cpumulti.getText()); cpuscores.click(); //Expand, Extract and Close GPU sub scores UiObject gpuscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(3)) .getChild(new UiSelector().index(4)); gpuscores.click(); UiObject gputerracotta = mDevice.findObject(new UiSelector().text("Terracotta - Vulkan").fromParent(new UiSelector().index(1))); UiObject gpuswordsman = mDevice.findObject(new UiSelector().text("Swordsman - Vulkan").fromParent(new UiSelector().index(1))); UiObject gpurefinery = mDevice.findObject(new UiSelector().text("Refinery - OpenGL ES3.1+AEP").fromParent(new UiSelector().index(1))); Log.d(TAG, "GPU Terracotta Score " + gputerracotta.getText()); Log.d(TAG, "GPU Swordsman Score " + gpuswordsman.getText()); Log.d(TAG, "GPU Refinery Score " + gpurefinery.getText()); gpuscores.click(); //Expand, Extract and Close UX sub scores UiObject uxscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(5)) .getChild(new UiSelector().index(4)); uxscores.click(); UiObject security = mDevice.findObject(new UiSelector().text("Data Security").fromParent(new UiSelector().index(1))); UiObject dataprocessing = mDevice.findObject(new UiSelector().text("Data Processing").fromParent(new UiSelector().index(1))); UiObject imageprocessing = mDevice.findObject(new UiSelector().text("Image Processing").fromParent(new UiSelector().index(1))); UiObject uxscore = mDevice.findObject(new UiSelector().text("User Experience").fromParent(new UiSelector().index(1))); UiObject videocts = mDevice.findObject(new UiSelector().text("Video CTS").fromParent(new UiSelector().index(1))); UiObject videodecode = mDevice.findObject(new UiSelector().text("Video Decode").fromParent(new UiSelector().index(1))); if (!security.exists() && list.waitForExists(60)) { list.scrollIntoView(security); } Log.d(TAG, "Data Security Score " + security.getText()); if (!dataprocessing.exists() && list.waitForExists(60)) { list.scrollIntoView(dataprocessing); } Log.d(TAG, "Data Processing Score " + dataprocessing.getText()); if (!imageprocessing.exists() && list.waitForExists(60)) { list.scrollIntoView(imageprocessing); } Log.d(TAG, "Image Processing Score " + imageprocessing.getText()); if (!uxscore.exists() && list.waitForExists(60)) { list.scrollIntoView(uxscore); } Log.d(TAG, "User Experience Score " + uxscore.getText()); if (!videocts.exists() && list.waitForExists(60)) { list.scrollIntoView(videocts); } Log.d(TAG, "Video CTS Score " + videocts.getText()); if (!videodecode.exists() && list.waitForExists(60)) { list.scrollIntoView(videodecode); } Log.d(TAG, "Video Decode Score " + videodecode.getText()); list.scrollToBeginning(10); uxscores.click(); //Expand, Extract and Close MEM sub scores UiObject memscores = mDevice.findObject(new UiSelector().resourceId("com.antutu.ABenchMark:id/result_details_recyclerView")) .getChild(new UiSelector().index(4)) .getChild(new UiSelector().index(4)); memscores.click(); UiObject ramaccess = mDevice.findObject(new UiSelector().text("RAM Access").fromParent(new UiSelector().index(1))); UiObject romapp = mDevice.findObject(new UiSelector().text("ROM APP IO").fromParent(new UiSelector().index(1))); UiObject romread = mDevice.findObject(new UiSelector().text("ROM Sequential Read").fromParent(new UiSelector().index(1))); UiObject romwrite = mDevice.findObject(new UiSelector().text("ROM Sequential Write").fromParent(new UiSelector().index(1))); UiObject romaccess = mDevice.findObject(new UiSelector().text("ROM Random Access").fromParent(new UiSelector().index(1))); if (!ramaccess.exists() && list.waitForExists(60)) { list.scrollIntoView(ramaccess); } Log.d(TAG, "RAM Access Score " + ramaccess.getText()); if (!romapp.exists() && list.waitForExists(60)) { list.scrollIntoView(romapp); } Log.d(TAG, "ROM APP IO Score " + romapp.getText()); if (!romread.exists() && list.waitForExists(60)) { list.scrollIntoView(romread); } Log.d(TAG, "ROM Sequential Read Score " + romread.getText()); if (!romwrite.exists() && list.waitForExists(60)) { list.scrollIntoView(romwrite); } Log.d(TAG, "ROM Sequential Write Score " + romwrite.getText()); if (!romaccess.exists() && list.waitForExists(60)) { list.scrollIntoView(romaccess); } Log.d(TAG, "ROM Random Access Score " + romaccess.getText()); list.scrollToBeginning(10); memscores.click(); } } ================================================ FILE: wa/workloads/antutu/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/workloads/antutu/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $0 &>/dev/null`; then script_path=`readlink -f $0 2>/dev/null` fi script_dir=`dirname $script_path` cd $script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wa dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $libs_dir cp $base_class $libs_dir # Build and return appropriate exit code if failed # gradle build ./gradlew clean :app:assembleDebug exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi # If successful move APK file to workload folder (overwrite previous) package=com.arm.wa.uiauto.antutu rm -f ../$package if [[ -f app/build/outputs/apk/debug/$package.apk ]]; then cp app/build/outputs/apk/debug/$package.apk ../$package.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/workloads/antutu/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Thu Jun 08 14:26:39 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/workloads/antutu/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/workloads/antutu/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/workloads/antutu/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/workloads/apache.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import division import os # pylint: disable=wrong-import-order,wrong-import-position from future.standard_library import install_aliases install_aliases() from urllib.request import urlopen # pylint: disable=import-error from wa import Workload, Parameter, Alias, WorkloadError from wa.utils.exec_control import once from wa.utils.misc import which, check_output class ApacheBenchmark(Workload): name = 'apache' description = ''' Load-test an apache installation by issueing parallel requests with ab. Run ab, the Apache benchmark on the host, directed at the target as the server. .. note:: It is assumed that Apache is already running on target. .. note:: Current implmentation only supports a very basic use of the benchmark. ''' parameters = [ Parameter('port', kind=int, default=80, description=''' Port on which Apache is running. '''), Parameter('path', default='/', description=''' Path to request. '''), Parameter('parallel_requests', kind=int, default=350, description=''' The number of parallel requests at a time. '''), Parameter('total_requests', kind=int, default=100000, description=''' The total number of parallel requests. '''), ] aliases = [ Alias('ab'), ] supported_targets = ['linux'] @once def initialize(self, context): ab = which('ab') if not ab: msg = 'ab not found on host; make sure apache2-utils (or you distro equivalent) package is installed.' raise WorkloadError(msg) response = urlopen('http://{}:{}{}'.format(self.target.conn.host, self.port, self.path)) code = response.getcode() if code != 200: msg = 'HTTP request failed with status {}; is Apache running on target?' raise WorkloadError(msg.format(code)) def setup(self, context): template = 'ab -k -c {} -n {} {}:{}{}' self.command = template.format(self.parallel_requests, self.total_requests, self.target.conn.host, self.port, self.path) self.output = None def run(self, context): self.logger.debug(self.command) self.output, _ = check_output(self.command, timeout=300, shell=True) def extract_results(self, context): outfile = os.path.join(context.output_directory, 'ab.output') with open(outfile, 'w') as wfh: wfh.write(self.output) context.add_artifact('ab-output', outfile, kind='raw') def update_output(self, context): # pylint: disable=too-many-locals with open(context.get_artifact_path('ab-output')) as fh: server_software = get_line(fh, 'Server Software').split(':')[1].strip() context.add_metadata('server-software', server_software) doc_len_str = get_line(fh, 'Document Length').split(':')[1].strip() doc_len = int(doc_len_str.split()[0]) context.add_metadata('document-length', doc_len) completed = int(get_line(fh, 'Complete requests').split(':')[1].strip()) failed = int(get_line(fh, 'Failed requests').split(':')[1].strip()) fail_rate = failed / completed * 100 context.add_metric('failed_request', fail_rate, units='percent', lower_is_better=True) rps_str = get_line(fh, 'Requests per second').split(':')[1].strip() rps = float(rps_str.split('[')[0]) rps_units = rps_str.split('[')[1].split(']')[0] context.add_metric('requests_per_second', rps, units=rps_units) tpr_str = get_line(fh, 'Time per request').split(':')[1].strip() tpr = float(tpr_str.split('[')[0]) tpr_units = tpr_str.split('[')[1].split(']')[0] context.add_metric('time_per_request', tpr, units=tpr_units) trate_str = get_line(fh, 'Transfer rate').split(':')[1].strip() trate = float(trate_str.split('[')[0]) trate_units = trate_str.split('[')[1].split(']')[0] context.add_metric('transfer_rate', trate, units=trate_units) pc99 = int(get_line(fh, '99%').split()[1]) context.add_metric('request_99percentile', pc99, 'ms') pc100 = int(get_line(fh, '100%').split()[1]) context.add_metric('longest_request', pc100, 'ms') def get_line(fh, text): for line in fh: if text in line: return line ================================================ FILE: wa/workloads/applaunch/__init__.py ================================================ # Copyright 2015-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=attribute-defined-outside-init from wa import ApkUiautoWorkload, Parameter from wa.framework import pluginloader class Applaunch(ApkUiautoWorkload): name = 'applaunch' description = ''' This workload launches and measures the launch time of applications for supporting workloads. Currently supported workloads are the ones that implement ``ApplaunchInterface``. For any workload to support this workload, it should implement the ``ApplaunchInterface``. The corresponding java file of the workload associated with the application being measured is executed during the run. The application that needs to be measured is passed as a parameter ``workload_name``. The parameters required for that workload have to be passed as a dictionary which is captured by the parameter ``workload_params``. This information can be obtained by inspecting the workload details of the specific workload. The workload allows to run multiple iterations of an application launch in two modes: 1. Launch from background 2. Launch from long-idle These modes are captured as a parameter applaunch_type. ``launch_from_background`` Launches an application after the application is sent to background by pressing Home button. ``launch_from_long-idle`` Launches an application after killing an application process and clearing all the caches. **Test Description:** - During the initialization and setup, the application being launched is launched for the first time. The jar file of the workload of the application is moved to device at the location ``workdir`` which further implements the methods needed to measure the application launch time. - Run phase calls the UiAutomator of the applaunch which runs in two subphases. A. Applaunch Setup Run: During this phase, welcome screens and dialogues during the first launch of the instrumented application are cleared. B. Applaunch Metric Run: During this phase, the application is launched multiple times determined by the iteration number specified by the parameter ``applaunch_iterations``. Each of these iterations are instrumented to capture the launch time taken and the values are recorded as UXPERF marker values in logfile. ''' supported_platforms = ['android'] parameters = [ Parameter('workload_name', kind=str, description='Name of the uxperf workload to launch', default='gmail'), Parameter('workload_params', kind=dict, default={}, description=""" parameters of the uxperf workload whose application launch time is measured """), Parameter('applaunch_type', kind=str, default='launch_from_background', allowed_values=['launch_from_background', 'launch_from_long-idle'], description=""" Choose launch_from_long-idle for measuring launch time from long-idle. These two types are described in the workload description. """), Parameter('applaunch_iterations', kind=int, default=1, description=""" Number of iterations of the application launch """), ] def init_resources(self, context): super(Applaunch, self).init_resources(context) self.workload_params['markers_enabled'] = True # pylint: disable=no-member self.workload = pluginloader.get_workload(self.workload_name, self.target, **self.workload_params) self.workload.init_resources(context) self.workload.initialize(context) self.package_names = self.workload.package_names self.pass_parameters() # Deploy test workload uiauto apk self.asset_files.append(self.workload.gui.uiauto_file) def pass_parameters(self): self.gui.uiauto_params['workload'] = self.workload.name self.gui.uiauto_params['package_name'] = self.workload.package self.gui.uiauto_params.update(self.workload.gui.uiauto_params) if self.workload.apk.activity: self.gui.uiauto_params['launch_activity'] = self.workload.apk.activity else: self.gui.uiauto_params['launch_activity'] = "None" self.gui.uiauto_params['applaunch_type'] = self.applaunch_type self.gui.uiauto_params['applaunch_iterations'] = self.applaunch_iterations def setup(self, context): self.workload.gui.uiauto_params['package_name'] = self.workload.apk.apk_info.package self.workload.gui.init_commands() self.workload.gui.deploy() super(Applaunch, self).setup(context) def finalize(self, context): super(Applaunch, self).finalize(context) self.workload.finalize(context) ================================================ FILE: wa/workloads/applaunch/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.application' def packageName = "com.arm.wa.uiauto.applaunch" android { compileSdkVersion 28 buildToolsVersion "28.0.3" defaultConfig { applicationId "${packageName}" minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "${packageName}.apk" } } } } dependencies { implementation fileTree(dir: 'libs', include: ['*.jar']) implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' implementation(name: 'uiauto', ext:'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/workloads/applaunch/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/workloads/applaunch/uiauto/app/src/main/java/com/arm/wa/uiauto/applaunch/UiAutomation.java ================================================ /* Copyright 2014-2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto.applaunch; import android.os.Bundle; import android.support.test.runner.AndroidJUnit4; import android.support.test.uiautomator.UiObject; import android.util.Log; import com.arm.wa.uiauto.ApplaunchInterface; import com.arm.wa.uiauto.BaseUiAutomation; import com.arm.wa.uiauto.UxPerfUiAutomation; import com.arm.wa.uiauto.ActionLogger; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import java.io.File; import dalvik.system.DexClassLoader; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation { /** * Uiobject that marks the end of launch of an application, which is workload * specific and added in the workload Java file by a method called getLaunchEndObject(). */ public UiObject launchEndObject; /** Timeout to wait for application launch to finish. */ private Integer launch_timeout = 10; public String applaunchType; public int applaunchIterations; public String activityName; public ApplaunchInterface launch_workload; protected Bundle parameters; protected String packageName; protected String packageID; @Before public void initialize() throws Exception { parameters = getParams(); packageID = getPackageID(parameters); // Get workload apk file parameters String packageName = parameters.getString("package_name"); String workload = parameters.getString("workload"); String workloadAPKPath = parameters.getString("workdir"); String workloadName = String.format("com.arm.wa.uiauto.%1s.apk", workload); String workloadAPKFile = String.format("%1s/%2s", workloadAPKPath, workloadName); // Load the apk file File apkFile = new File(workloadAPKFile); File dexLocation = mContext.getDir("outdex", 0); if(!apkFile.exists()) { throw new Exception(String.format("APK file not found: %s ", workloadAPKFile)); } DexClassLoader classloader = new DexClassLoader(apkFile.toURI().toURL().toString(), dexLocation.getAbsolutePath(), null, mContext.getClassLoader()); Class uiautomation = null; Object uiautomation_interface = null; String workloadClass = String.format("com.arm.wa.uiauto.%1s.UiAutomation", workload); try { uiautomation = classloader.loadClass(workloadClass); } catch (ClassNotFoundException e) { e.printStackTrace(); } Log.d("Class loaded:", uiautomation.getCanonicalName()); uiautomation_interface = uiautomation.newInstance(); // Create an Application Interface object from the workload launch_workload = ((ApplaunchInterface)uiautomation_interface); launch_workload.initialize_instrumentation(); launch_workload.setWorkloadParameters(parameters); // Get parameters for application launch applaunchType = parameters.getString("applaunch_type"); applaunchIterations = parameters.getInt("applaunch_iterations"); activityName = parameters.getString("launch_activity"); } /** * Setup run for applaunch workload that clears the initial * run dialogues on launching an application package. */ @Test public void setup() throws Exception { mDevice.setOrientationNatural(); launch_workload.runApplicationSetup(); unsetScreenOrientation(); closeApplication(); } @Test public void runWorkload() throws Exception { launchEndObject = launch_workload.getLaunchEndObject(); for (int iteration = 0; iteration < applaunchIterations; iteration++) { Log.d("Applaunch iteration number: ", String.valueOf(applaunchIterations)); sleep(20);//sleep for a while before next iteration killBackground(); runApplaunchIteration(iteration); closeApplication(); } } @Test public void teardown() throws Exception { mDevice.unfreezeRotation(); } /** * This method performs multiple iterations of application launch and * records the time taken for each iteration. */ public void runApplaunchIteration(Integer iteration_count) throws Exception{ String testTag = "applaunch" + iteration_count; String launchCommand = launch_workload.getLaunchCommand(); AppLaunch applaunch = new AppLaunch(testTag, launchCommand); applaunch.startLaunch(); // Launch the application and start timer applaunch.endLaunch(); // marks the end of launch and stops timer } /* * AppLaunch class implements methods that facilitates launching applications * from the uiautomator. It has methods that are used for one complete iteration of application * launch instrumentation. * ActionLogger class is instantiated within the class for measuring applaunch time. * startLaunch(): Marks the beginning of the application launch, starts Timer * endLaunch(): Marks the end of application, ends Timer * launchMain(): Starts the application launch process and validates the finish of launch. */ private class AppLaunch { private String testTag; private String launchCommand; private ActionLogger logger; Process launch_p; public AppLaunch(String testTag, String launchCommand) { this.testTag = testTag; this.launchCommand = launchCommand; this.logger = new ActionLogger(testTag, parameters); } // Beginning of application launch public void startLaunch() throws Exception{ logger.start(); launchMain(); } // Launches the application. public void launchMain() throws Exception{ launch_p = Runtime.getRuntime().exec(launchCommand); launchValidate(launch_p); } // Called by launchMain() to check if app launch is successful public void launchValidate(Process launch_p) throws Exception { launch_p.waitFor(); Integer exit_val = launch_p.exitValue(); if (exit_val != 0) { throw new Exception("Application could not be launched"); } } // Marks the end of application launch of the workload. public void endLaunch() throws Exception{ waitObject(launchEndObject, launch_timeout); logger.stop(); launch_p.destroy(); } } // Exits the application according to application launch type. public void closeApplication() throws Exception{ if(applaunchType.equals("launch_from_background")) { pressHome(); } else if(applaunchType.equals("launch_from_long-idle")) { killApplication(); dropCaches(); } } // Kills the application process public void killApplication() throws Exception{ Process kill_p; String command = String.format("am force-stop %s", packageName); kill_p = Runtime.getRuntime().exec(new String[] { "su", "-c", command}); kill_p.waitFor(); kill_p.destroy(); } // Kills the background processes public void killBackground() throws Exception{ Process kill_p; kill_p = Runtime.getRuntime().exec("am kill-all"); kill_p.waitFor(); kill_p.destroy(); } // Drop the caches public void dropCaches() throws Exception{ Process sync; sync = Runtime.getRuntime().exec(new String[] { "su", "-c", "sync"}); sync.waitFor(); sync.destroy(); Process drop_cache; String command = "echo 3 > /proc/sys/vm/drop_caches"; drop_cache = Runtime.getRuntime().exec(new String[] { "su", "-c", command}); drop_cache.waitFor(); drop_cache.destroy(); } } ================================================ FILE: wa/workloads/applaunch/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/workloads/applaunch/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $0 &>/dev/null`; then script_path=`readlink -f $0 2>/dev/null` fi script_dir=`dirname $script_path` cd $script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wa dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $libs_dir cp $base_class $libs_dir # Build and return appropriate exit code if failed gradle build ./gradlew clean :app:assembleDebug exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi # If successful move APK file to workload folder (overwrite previous) package=com.arm.wa.uiauto.applaunch rm -f ../$package if [[ -f app/build/outputs/apk/debug/$package.apk ]]; then cp app/build/outputs/apk/debug/$package.apk ../$package.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/workloads/applaunch/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Thu Jun 08 14:21:45 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/workloads/applaunch/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/workloads/applaunch/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/workloads/applaunch/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/workloads/benchmarkpi/__init__.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re from wa import ApkUiautoWorkload class BenchmarkPi(ApkUiautoWorkload): name = 'benchmarkpi' description = """ Measures the time the target device takes to run and complete the Pi calculation algorithm. http://androidbenchmark.com/howitworks.php from the website: The whole idea behind this application is to use the same Pi calculation algorithm on every Android Device and check how fast that process is. Better calculation times, conclude to faster Android devices. This way you can also check how lightweight your custom made Android build is. Or not. As Pi is an irrational number, Benchmark Pi does not calculate the actual Pi number, but an approximation near the first digits of Pi over the same calculation circles the algorithms needs. So, the number you are getting in milliseconds is the time your mobile device takes to run and complete the Pi calculation algorithm resulting in a approximation of the first Pi digits. """ package_names = ['gr.androiddev.BenchmarkPi'] activity = '.BenchmarkPi' regex = re.compile('You calculated Pi in ([0-9]+)') def update_output(self, context): super(BenchmarkPi, self).update_output(context) logcat_file = context.get_artifact_path('logcat') with open(logcat_file, errors='replace') as fh: for line in fh: match = self.regex.search(line) if match: result = int(match.group(1)) if result is not None: context.add_metric('pi calculation', result, 'milliseconds', lower_is_better=True) ================================================ FILE: wa/workloads/benchmarkpi/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.application' def packageName = "com.arm.wa.uiauto.benchmarkpi" android { compileSdkVersion 28 buildToolsVersion "28.0.3" defaultConfig { applicationId "${packageName}" minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "${packageName}.apk" } } } } dependencies { implementation fileTree(dir: 'libs', include: ['*.jar']) implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' implementation(name: 'uiauto', ext:'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/workloads/benchmarkpi/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/workloads/benchmarkpi/uiauto/app/src/main/java/com/arm/wa/uiauto/benchmarkpi/UiAutomation.java ================================================ /* Copyright 2013-2017 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto.benchmarkpi; import android.app.Activity; import android.os.Bundle; import android.support.test.runner.AndroidJUnit4; import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiSelector; import android.util.Log; import com.arm.wa.uiauto.BaseUiAutomation; import org.junit.Test; import org.junit.runner.RunWith; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation { public static String TAG = "benchmarkpi"; public Bundle parameters; public String packageID; @Test public void setup() throws Exception { dismissAndroidVersionPopup(); } @Test public void runWorkload() throws Exception { startTest(); waitForResults(); } @Test public void extractResults() throws Exception { UiSelector selector = new UiSelector(); UiObject resultsText = mDevice.findObject(selector.textContains("You calculated Pi in") .className("android.widget.TextView")); Log.v(TAG, resultsText.getText()); } public void startTest() throws Exception{ UiSelector selector = new UiSelector(); UiObject benchButton = mDevice.findObject(selector.text("Benchmark my Android!") .className("android.widget.Button")); benchButton.click(); } public void waitForResults() throws Exception{ UiSelector selector = new UiSelector(); UiObject submitButton = mDevice.findObject(selector.text("Submit") .className("android.widget.Button")); submitButton.waitForExists(10 * 1000); } } ================================================ FILE: wa/workloads/benchmarkpi/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/workloads/benchmarkpi/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2013-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -e # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $0 &>/dev/null`; then script_path=`readlink -f $0 2>/dev/null` fi script_dir=`dirname $script_path` cd $script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wa dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $libs_dir cp $base_class $libs_dir # Build and return appropriate exit code if failed # gradle build ./gradlew clean :app:assembleDebug exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi # If successful move APK file to workload folder (overwrite previous) package=com.arm.wa.uiauto.benchmarkpi rm -f ../$package if [[ -f app/build/outputs/apk/debug/$package.apk ]]; then cp app/build/outputs/apk/debug/$package.apk ../$package.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/workloads/benchmarkpi/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Wed May 03 15:42:44 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/workloads/benchmarkpi/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/workloads/benchmarkpi/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/workloads/benchmarkpi/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/workloads/chrome/__init__.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from wa import Parameter, ApkUiautoWorkload from wa.framework.exception import WorkloadError class Chrome(ApkUiautoWorkload): name = 'chrome' description = ''' A workload to perform standard Web browsing tasks with Google Chrome. The workload carries out a number of typical Web-based tasks, navigating through a handful of Wikipedia pages in multiple browser tabs. To run the workload in offline mode, a ``pages.tar`` archive and an ``OfflinePages.db`` file are required. For users wishing to generate these files themselves, Chrome should first be operated from an Internet-connected environment and the following Wikipedia pages should be downloaded for offline use within Chrome: - https://en.m.wikipedia.org/wiki/Main_Page - https://en.m.wikipedia.org/wiki/United_States - https://en.m.wikipedia.org/wiki/California Following this, the files of interest for viewing these pages offline can be found in the ``/data/data/com.android.chrome/app_chrome/Default/Offline Pages`` directory. The ``OfflinePages.db`` file can be copied from the 'metadata' subdirectory, while the ``*.mhtml`` files that should make up the ``pages.tar`` file can be found in the 'archives' subdirectory. These page files can then be archived to produce a tarball using a command such as ``tar -cvf pages.tar -C /path/to/archives .``. Both this and ``OfflinePages.db`` should then be placed in the ``~/.workload_automation/dependencies/chrome/`` directory on your local machine, creating this if it does not already exist. Known working APK version: 65.0.3325.109 ''' package_names = ['com.android.chrome'] parameters = [ Parameter('offline_mode', kind=bool, default=False, description=''' If set to ``True``, the workload will execute in offline mode. This mode requires root and makes use of a tarball of \*.mhtml files 'pages.tar' and an metadata database 'OfflinePages.db'. The tarball is extracted directly to the application's offline pages 'archives' directory, while the database is copied to the offline pages 'metadata' directory. '''), ] @property def requires_network(self): return not self.offline_mode @property def requires_rerun(self): # In offline mode we need to restart the application after modifying its data directory return self.offline_mode def __init__(self, target, **kwargs): super(Chrome, self).__init__(target, **kwargs) if self.offline_mode: self.deployable_assets = ['pages.tar', 'OfflinePages.db'] self.cleanup_assets = True def initialize(self, context): super(Chrome, self).initialize(context) if self.offline_mode and not self.target.is_rooted: raise WorkloadError('This workload requires root to set up Chrome for offline usage.') def setup_rerun(self): super(Chrome, self).setup_rerun() offline_pages = self.target.path.join(self.target.package_data_directory, self.package, 'app_chrome', 'Default', 'Offline\ Pages') metadata_src = self.target.path.join(self.target.working_directory, 'OfflinePages.db') metadata_dst = self.target.path.join(offline_pages, 'metadata') archives_src = self.target.path.join(self.target.working_directory, 'pages.tar') archives_dst = self.target.path.join(offline_pages, 'archives') owner = self.target.execute("{} stat -c '%u' {}".format(self.target.busybox, offline_pages), as_root=True).strip() self.target.execute('{} tar -xvf {} -C {}'.format(self.target.busybox, archives_src, archives_dst), as_root=True) self.target.execute('{} cp {} {}'.format(self.target.busybox, metadata_src, metadata_dst), as_root=True) self.target.execute('{0} chown -R {1}:{1} {2}'.format(self.target.busybox, owner, offline_pages), as_root=True) ================================================ FILE: wa/workloads/chrome/uiauto/app/build.gradle ================================================ apply plugin: 'com.android.application' android { compileSdkVersion 18 buildToolsVersion '25.0.0' defaultConfig { applicationId "com.arm.wa.uiauto.chrome" minSdkVersion 18 targetSdkVersion 28 testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { applicationVariants.all { variant -> variant.outputs.each { output -> output.outputFileName = "com.arm.wa.uiauto.chrome.apk" } } } } dependencies { implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.android.support.test:runner:0.5' implementation 'com.android.support.test:rules:0.5' implementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2' implementation(name: 'uiauto', ext: 'aar') } repositories { flatDir { dirs 'libs' } } ================================================ FILE: wa/workloads/chrome/uiauto/app/src/main/AndroidManifest.xml ================================================ ================================================ FILE: wa/workloads/chrome/uiauto/app/src/main/java/com/arm/wa/uiauto/UiAutomation.java ================================================ /* Copyright 2018 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.arm.wa.uiauto.chrome; import android.app.Activity; import android.os.Bundle; import org.junit.Test; import org.junit.runner.RunWith; import android.support.test.runner.AndroidJUnit4; import android.util.Log; import android.view.KeyEvent; import android.support.test.uiautomator.UiObject; import android.support.test.uiautomator.UiObjectNotFoundException; import android.support.test.uiautomator.UiScrollable; import android.support.test.uiautomator.UiSelector; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import com.arm.wa.uiauto.ApplaunchInterface; import com.arm.wa.uiauto.BaseUiAutomation; import com.arm.wa.uiauto.UiAutoUtils; @RunWith(AndroidJUnit4.class) public class UiAutomation extends BaseUiAutomation implements ApplaunchInterface { protected Bundle parameters; protected String packageID; public static String TAG = "chrome"; @Before public void initialize() throws Exception { parameters = getParams(); packageID = getPackageID(parameters); } @Test public void setup() throws Exception { setScreenOrientation(ScreenOrientation.NATURAL); runApplicationSetup(); } public void navigateToPage(String url, boolean from_new_tab) throws Exception { UiObject searchBar, urlBar; if (from_new_tab) { // On the new tab page, click on the search box to turn it into a url bar searchBar = mDevice.findObject(new UiSelector().resourceId(packageID + "search_box_text") .className("android.widget.EditText")); searchBar.click(); } // Navigate to the specified URL urlBar = mDevice.findObject(new UiSelector().resourceId(packageID + "url_bar") .className("android.widget.EditText")); urlBar.click(); urlBar.setText(url); pressEnter(); } public void newTab() throws Exception { UiObject tabSwitcher, newTab; // Activate the tab switcher tabSwitcher = mDevice.findObject(new UiSelector().resourceId(packageID + "tab_switcher_button") .className("android.widget.ImageButton")); if (tabSwitcher.exists()){ tabSwitcher.clickAndWaitForNewWindow(uiAutoTimeout); // Click the New Tab button newTab = mDevice.findObject(new UiSelector().resourceId(packageID + "new_tab_button") .className("android.widget.Button")); newTab.clickAndWaitForNewWindow(uiAutoTimeout); } // Support Tablet devices which do not have tab switcher else { UiObject menu_button = mDevice.findObject(new UiSelector().resourceId(packageID + "menu_button") .className("android.widget.ImageButton")); menu_button.click(); newTab = mDevice.findObject(new UiSelector().resourceId(packageID + "menu_item_text") .textContains("New tab")); newTab.click(); } } public void followTextLink(String text) throws Exception { UiObject link = mDevice.findObject(new UiSelector().text(text).clickable(true)); link.waitForExists(uiAutoTimeout); link.clickAndWaitForNewWindow(); } @Test public void runWorkload() throws Exception { // Initial browsing within a single tab navigateToPage("https://en.m.wikipedia.org/wiki/Main_Page", true); uiDeviceSwipeUp(100); sleep(1); uiDeviceSwipeUp(100); sleep(1); uiDeviceSwipeUp(250); sleep(1); uiDeviceSwipeDown(100); navigateToPage("https://en.m.wikipedia.org/wiki/United_States", false); uiDeviceSwipeUp(100); sleep(1); uiDeviceSwipeUp(250); sleep(1); uiDeviceSwipeDown(100); // URL entry and link navigation within a new tab newTab(); navigateToPage("https://en.m.wikipedia.org/wiki/California", true); sleep(2); followTextLink("United States"); uiDeviceSwipeDown(50); sleep(1); uiDeviceSwipeUp(10); sleep(3); // Pinch to zoom, scroll around UiObject webView = mDevice.findObject(new UiSelector().className("android.webkit.WebView")); uiObjectVertPinchOut(webView, 100, 50); uiDeviceSwipeUp(300); sleep(1); uiObjectVertPinchIn(webView, 100, 50); uiDeviceSwipeUp(100); sleep(1); uiDeviceSwipeUp(100); sleep(3); // Go back a page pressBack(); } @Test public void teardown() throws Exception { unsetScreenOrientation(); } public void runApplicationSetup() throws Exception { UiObject sendReportBox; UiObject acceptButton, noThanksButton; sendReportBox = mDevice.findObject(new UiSelector().resourceId(packageID + "send_report_checkbox") .className("android.widget.CheckBox")); sendReportBox.click(); acceptButton = mDevice.findObject(new UiSelector().resourceId(packageID + "terms_accept") .className("android.widget.Button")); acceptButton.clickAndWaitForNewWindow(uiAutoTimeout); noThanksButton = mDevice.findObject(new UiSelector().resourceId(packageID + "negative_button") .className("android.widget.Button")); noThanksButton.clickAndWaitForNewWindow(uiAutoTimeout); } public UiObject getLaunchEndObject() { UiObject launchEndObject = mDevice.findObject(new UiSelector().className("android.widget.EditText")); return launchEndObject; } public String getLaunchCommand() { String launch_command = UiAutoUtils.createLaunchCommand(parameters); return launch_command; } public void setWorkloadParameters(Bundle workload_parameters) { parameters = workload_parameters; packageID = getPackageID(parameters); } } ================================================ FILE: wa/workloads/chrome/uiauto/build.gradle ================================================ // Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { repositories { jcenter() google() } dependencies { classpath 'com.android.tools.build:gradle:7.2.1' // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { jcenter() google() } } task clean(type: Delete) { delete rootProject.buildDir } ================================================ FILE: wa/workloads/chrome/uiauto/build.sh ================================================ #!/bin/bash # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # CD into build dir if possible - allows building from any directory script_path='.' if `readlink -f $0 &>/dev/null`; then script_path=`readlink -f $0 2>/dev/null` fi script_dir=`dirname $script_path` cd $script_dir # Ensure gradelw exists before starting if [[ ! -f gradlew ]]; then echo 'gradlew file not found! Check that you are in the right directory.' exit 9 fi # Copy base class library from wlauto dist libs_dir=app/libs base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"` mkdir -p $libs_dir cp $base_class $libs_dir # Build and return appropriate exit code if failed # gradle build ./gradlew clean :app:assembleDebug exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "ERROR: 'gradle build' exited with code $exit_code" exit $exit_code fi # If successful move APK file to workload folder (overwrite previous) rm -f ../com.arm.wa.uiauto.chrome if [[ -f app/build/outputs/apk/debug/com.arm.wa.uiauto.chrome.apk ]]; then cp app/build/outputs/apk/debug/com.arm.wa.uiauto.chrome.apk ../com.arm.wa.uiauto.chrome.apk else echo 'ERROR: UiAutomator apk could not be found!' exit 9 fi ================================================ FILE: wa/workloads/chrome/uiauto/gradle/wrapper/gradle-wrapper.properties ================================================ #Wed May 03 15:42:44 BST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip ================================================ FILE: wa/workloads/chrome/uiauto/gradlew ================================================ #!/usr/bin/env bash ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn ( ) { echo "$*" } die ( ) { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; esac # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules function splitJvmOpts() { JVM_OPTS=("$@") } eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" ================================================ FILE: wa/workloads/chrome/uiauto/gradlew.bat ================================================ @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windowz variants if not "%OS%" == "Windows_NT" goto win9xME_args if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* goto execute :4NT_args @rem Get arguments from the 4NT Shell from JP Software set CMD_LINE_ARGS=%$ :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega ================================================ FILE: wa/workloads/chrome/uiauto/settings.gradle ================================================ include ':app' ================================================ FILE: wa/workloads/deepbench/__init__.py ================================================ # Copyright 2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=E1101,W0201 import os import re import pandas as pd from wa import Workload, Parameter, Alias, Executable from wa.utils.types import numeric class Deepbench(Workload): name = 'deepbench' description = """ Benchmarks operations that are important to deep learning. Including GEMM and convolution. The benchmark and its documentation are available here: https://github.com/baidu-research/DeepBench .. note:: parameters of matrices used in each sub-test are added as classifiers to the metrics. See the benchmark documentation for the explanation of the various parameters .. note:: at the moment only the "Arm Benchmarks" subset of DeepBench is supported. """ parameters = [ Parameter('test', default='gemm', allowed_values=['gemm', 'conv', 'sparse'], description=''' Specifies which of the available benchmarks will be run. gemm Performs GEneral Matrix Multiplication of dense matrices of varying sizes. conv Performs convolutions on inputs in NCHW format. sparse Performs GEneral Matrix Multiplication of sparse matrices of varying sizes, and compares them to corresponding dense operations. '''), ] aliases = [ Alias('deep-gemm', test='gemm'), Alias('deep-conv', test='conv'), Alias('deep-sparse', test='sparse'), ] test_metrics = { 'gemm': ['time (msec)', 'GOPS'], 'conv': ['fwd_time (usec)'], 'sparse': ['sparse time (usec)', 'dense time (usec)', 'speedup'], } lower_is_better = { 'time (msec)': True, 'GOPS': False, 'fwd_time (usec)': True, 'sparse time (usec)': True, 'dense time (usec)': True, 'speedup': False, } installed = {} def initialize(self, context): self.exe_name = '{}_bench'.format(self.test) if self.exe_name not in self.installed: resource = Executable(self, self.target.abi, self.exe_name) host_exe = context.get_resource(resource) self.target.killall(self.exe_name) self.installed[self.exe_name] = self.target.install(host_exe) self.target_exe = self.installed[self.exe_name] def setup(self, context): self.target.killall(self.exe_name) def run(self, context): self.output = None try: timeout = 10800 self.output = self.target.execute(self.target_exe, timeout=timeout) except KeyboardInterrupt: self.target.killall(self.exe_name) raise def extract_results(self, context): if self.output: outfile = os.path.join(context.output_directory, '{}.output'.format(self.test)) with open(outfile, 'w') as wfh: wfh.write(self.output) context.add_artifact('deepbench-output', outfile, 'raw', "deepbench's stdout") def update_output(self, context): raw_file = context.get_artifact_path('deepbench-output') if not raw_file: return table = read_result_table(raw_file) for _, row in table.iterrows(): items = dict(row) metrics = [] for metric_name in self.test_metrics[self.test]: metrics.append((metric_name, items.pop(metric_name))) for name, value in metrics: context.add_metric(name, value, lower_is_better=self.lower_is_better[name], classifiers=items) def finalize(self, context): if self.cleanup_assets: if self.exe_name in self.installed: self.target.uninstall(self.exe_name) del self.installed[self.exe_name] def numeric_best_effort(value): try: return numeric(value) except ValueError: return value def read_result_table(filepath): columns = [] entries = [] with open(filepath) as fh: try: # fast-forward to the header line = next(fh) while not line.startswith('----'): line = next(fh) header_line = next(fh) haader_sep = re.compile(r'(?<=[) ]) ') # Since headers can contain spaces, use two spaces as column separator parts = [p.strip() for p in haader_sep.split(header_line)] columns = [p for p in parts if p] line = next(fh) while line.strip(): if line.startswith('----'): line = next(fh) row = [numeric_best_effort(i) for i in line.strip().split()] entries.append(row) line = next(fh) except StopIteration: pass return pd.DataFrame(entries, columns=columns) ================================================ FILE: wa/workloads/dhrystone/__init__.py ================================================ # Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=E1101,W0201 import os import re from wa import Workload, Parameter, ConfigError, Executable from wa.utils.exec_control import once from wa.utils.types import cpu_mask class Dhrystone(Workload): name = 'dhrystone' description = """ Runs the Dhrystone benchmark. Original source from:: http://classes.soe.ucsc.edu/cmpe202/benchmarks/standard/dhrystone.c This version has been modified to configure duration and the number of threads used. """ bm_regex = re.compile(r'This machine benchmarks at (?P\d+)') dmips_regex = re.compile(r'(?P\d+) DMIPS') time_regex = re.compile(r'Total dhrystone run time: (?P